[Yt-svn] yt-commit r1296 - in trunk: . doc yt yt/extensions yt/extensions/lightcone yt/lagos yt/raven

britton at wrangler.dreamhost.com britton at wrangler.dreamhost.com
Wed May 6 16:49:59 PDT 2009


Author: britton
Date: Wed May  6 16:49:57 2009
New Revision: 1296
URL: http://yt.spacepope.org/changeset/1296

Log:
Changing hdf5 module from pytables to h5py.  Thanks to Matt for making the 
patch and helping to facilitate this.


Modified:
   trunk/doc/install_script.sh
   trunk/doc/install_script_osx.sh
   trunk/setup.py
   trunk/yt/commands.py
   trunk/yt/extensions/HaloProfiler.py
   trunk/yt/extensions/HierarchySubset.py
   trunk/yt/extensions/lightcone/HaloMask.py
   trunk/yt/extensions/lightcone/LightCone.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/HaloFinding.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/ParallelTools.py
   trunk/yt/lagos/__init__.py
   trunk/yt/raven/FixedResolution.py

Modified: trunk/doc/install_script.sh
==============================================================================
--- trunk/doc/install_script.sh	(original)
+++ trunk/doc/install_script.sh	Wed May  6 16:49:57 2009
@@ -31,6 +31,7 @@
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with 
                 # the system zlib, which is compiled statically.
                 # If need be, you can turn this off.
+INST_HG=1       # Install Mercurial or not?
 
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
@@ -100,7 +101,7 @@
 get_enzotools numpy-1.2.1.tar.gz
 get_enzotools matplotlib-0.98.5.2.tar.gz
 get_enzotools ipython-0.9.1.tar.gz
-get_enzotools tables-2.1.tar.gz
+get_enzotools h5py-1.1.0.tar.gz
 
 if [ -z "$YT_DIR" ]
 then
@@ -151,6 +152,7 @@
         cd ..
     fi
     export HDF5_DIR=${DEST_DIR}
+    export HDF5_API=16
 fi
 
 if [ ! -e Python-2.6.1/done ]
@@ -193,7 +195,7 @@
 do_setup_py numpy-1.2.1 ${NUMPY_ARGS}
 do_setup_py matplotlib-0.98.5.2
 do_setup_py ipython-0.9.1
-do_setup_py tables-2.1 
+do_setup_py h5py-1.1.0
 
 echo "Doing yt update"
 MY_PWD=`pwd`
@@ -206,6 +208,12 @@
 touch done
 cd $MY_PWD
 
+if [ $INST_HG -eq 1 ]
+then
+    echo "Installing Mercurial."
+    ( ${DEST_DIR}/bin/easy_install-2.6 mercurial 2>&1 ) 1>> ${LOG_FILE} || do_exit
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -232,6 +240,13 @@
 echo "as the source for all the YT code.  This means you probably shouldn't"
 echo "delete it, but on the plus side, any changes you make there are"
 echo "automatically propagated."
+if [ $INST_HG -eq 1 ]
+then
+  echo "Mercurial has also been installed:"
+  echo
+  echo "$DEST_DIR/bin/hg"
+  echo
+fi
 echo
 echo "For support, see one of the following websites:"
 echo

Modified: trunk/doc/install_script_osx.sh
==============================================================================
--- trunk/doc/install_script_osx.sh	(original)
+++ trunk/doc/install_script_osx.sh	Wed May  6 16:49:57 2009
@@ -22,6 +22,7 @@
 # and install it on its own
 #HDF5_DIR=
 
+INST_HG=1       # Install Mercurial or not?
 # If you've got YT some other place, set this to point to it.
 YT_DIR=""
 
@@ -126,7 +127,7 @@
 get_enzotools numpy-1.2.1-py2.5-macosx10.5.dmg
 get_enzotools matplotlib-0.98.5.2-py2.5-mpkg.zip
 get_enzotools ipython-0.9.1.tar.gz
-get_enzotools tables-2.1.tar.gz
+get_enzotools h5py-1.1.0.tar.gz
 
 if [ -z "$YT_DIR" ]
 then
@@ -159,7 +160,8 @@
         touch done
         cd ..
     fi
-    HDF5_DIR=${DEST_DIR}
+    export HDF5_DIR=${DEST_DIR}
+    export HDF5_API=16
 fi
 
 [ ! -e ${DEST_DIR}/src/py_done ] && self_install \
@@ -186,7 +188,7 @@
 fi
 
 do_setup_py ipython-0.9.1
-do_setup_py tables-2.1 --hdf5=${HDF5_DIR}
+do_setup_py h5py-1.1.0
 
 echo "Doing yt update"
 MY_PWD=`pwd`
@@ -200,6 +202,12 @@
 touch done
 cd $MY_PWD
 
+if [ $INST_HG -eq 1 ]
+then
+    echo "Installing Mercurial."
+    ( ${DEST_DIR}/bin/easy_install-2.6 mercurial 2>&1 ) 1>> ${LOG_FILE} || do_exit
+fi
+
 echo
 echo
 echo "========================================================================"
@@ -224,6 +232,13 @@
 echo "as the source for all the YT code.  This means you probably shouldn't"
 echo "delete it, but on the plus side, any changes you make there are"
 echo "automatically propagated."
+if [ $INST_HG -eq 1 ]
+then
+  echo "Mercurial has also been installed:"
+  echo
+  echo "$DEST_DIR/bin/hg"
+  echo
+fi
 echo
 echo "For support, see one of the following websites:"
 echo

Modified: trunk/setup.py
==============================================================================
--- trunk/setup.py	(original)
+++ trunk/setup.py	Wed May  6 16:49:57 2009
@@ -54,7 +54,7 @@
         keywords='astronomy astrophysics visualization amr adaptivemeshrefinement',
         install_requires = ['matplotlib', 'numpy','ipython'],
         extras_require = { 'GUI' : ['wxPython'],
-                           'storage' : ['tables'], 
+                           'storage' : ['h5py'], 
                            'pdf' : ['pypdf']},
         entry_points = { 'console_scripts' : [
                             'yt = yt.commands:run_main',

Modified: trunk/yt/commands.py
==============================================================================
--- trunk/yt/commands.py	(original)
+++ trunk/yt/commands.py	Wed May  6 16:49:57 2009
@@ -326,21 +326,20 @@
         ${cmd_option_list}
         """
         from yt.extensions.HierarchySubset import ExtractedHierarchy
-        import tables
+        import h5py
 
         first = int(start)
         last = int(stop)
 
         # Set up our global metadata
-        afile = tables.openFile(opts.output, "w")
-        md = afile.createGroup("/", "globalMetaData")
-        mda = md._v_attrs
-        mda.datatype = 0
-        mda.staggering = 1
-        mda.fieldtype = 1
+        afile = h5py.File(opts.output, "w")
+        md = afile.create_group("/globalMetaData")
+        md.attrs['datatype'] = 0
+        md.attrs['staggering'] = 1
+        md.attrs['fieldtype'] = 1
 
-        mda.minTimeStep = first
-        mda.maxTimeStep = last
+        md.attrs['minTimeStep'] = first
+        md.attrs['maxTimeStep'] = last
 
         times = []
         # Get our staggering correct based on skip
@@ -365,15 +364,15 @@
             t2.append(pf["InitialTime"])
 
         # This should be the same
-        mda.rootDelta = (pf["unitary"]/pf["TopGridDimensions"]).astype('float64')
-        mda.minTime = times[0]
-        mda.maxTime = times[-1]
-        mda.numTimeSteps = len(timesteps)
+        md.attrs['rootDelta'] = (pf["unitary"]/pf["TopGridDimensions"]).astype('float64')
+        md.attrs['minTime'] = times[0]
+        md.attrs['maxTime'] = times[-1]
+        md.attrs['numTimeSteps'] = len(timesteps)
 
         # I think we just want one value here
         rel_times = na.array(times, dtype='float64') - int(opts.subtract_time)*times[0]
-        afile.createArray(md, "sorted_times", na.array(rel_times))
-        afile.createArray(md, "sorted_timesteps", timesteps)
+        md.create_dataset("sorted_times", data=na.array(rel_times))
+        md.create_dataset("sorted_timesteps", data=timesteps)
 
         afile.close()
         

Modified: trunk/yt/extensions/HaloProfiler.py
==============================================================================
--- trunk/yt/extensions/HaloProfiler.py	(original)
+++ trunk/yt/extensions/HaloProfiler.py	Wed May  6 16:49:57 2009
@@ -29,7 +29,7 @@
 import yt.raven as raven
 import numpy as na
 import os
-import tables as h5
+import h5py
 
 PROFILE_RADIUS_THRESHOLD = 2
 
@@ -275,13 +275,13 @@
                             (outputDir,halo['id'],axes[w])
                     mylog.info("Saving projection data to %s." % dataFilename)
 
-                    output = h5.openFile(dataFilename, "a")
+                    output = h5py.File(dataFilename, "a")
                     # Create fixed resolution buffer for each projection and write them out.
                     for e,field in enumerate(self.projectionFields.keys()):
                         frb = raven.FixedResolutionBuffer(pc.plots[e].data,(proj_left[0],proj_right[0],proj_left[1],proj_right[1]),
                                                           (projectionResolution,projectionResolution),
                                                           antialias=False)
-                        output.createArray("/",field,frb[field])
+                        output.create_dataset("/%s" % field,data=frb[field])
                     output.close()
 
                 if save_images:

Modified: trunk/yt/extensions/HierarchySubset.py
==============================================================================
--- trunk/yt/extensions/HierarchySubset.py	(original)
+++ trunk/yt/extensions/HierarchySubset.py	Wed May  6 16:49:57 2009
@@ -25,7 +25,7 @@
 
 
 from yt.mods import *
-import tables, os.path
+import h5py, os.path
 
 import yt.commands as commands
 
@@ -123,9 +123,10 @@
 
     def export_output(self, afile, n, field):
         # I prefer dict access, but tables doesn't.
-        time_node = afile.createGroup("/", "time-%s" % n)
-        time_node._v_attrs.time = self.pf["InitialTime"]
-        time_node._v_attrs.numLevels = self.pf.h.max_level+1-self.min_level
+        # But h5py does!
+        time_node = afile.create_group("/time-%s" % n)
+        time_node.attrs['time'] = self.pf["InitialTime"]
+        time_node.attrs['numLevels'] = self.pf.h.max_level+1-self.min_level
         # Can take a while, so let's get a progressbar
         self._export_all_levels(afile, time_node, field)
 
@@ -137,12 +138,12 @@
         pbar.finish()
 
     def export_level(self, afile, time_node, level, field, grids = None):
-        level_node = afile.createGroup(time_node, "level-%s" % level)
+        level_node = afile.create_group("%s/level-%s" % (time_node,level))
         # Grid objects on this level...
         if grids is None: grids = self.pf.h.select_grids(level+self.min_level)
-        level_node._v_attrs.delta = grids[0].dds*self.mult_factor
-        level_node._v_attrs.relativeRefinementFactor = na.array([2]*3, dtype='int32')
-        level_node._v_attrs.numGrids = len(grids)
+        level_node.attrs['delta'] = grids[0].dds*self.mult_factor
+        level_node.attrs['relativeRefinementFactor'] = na.array([2]*3, dtype='int32')
+        level_node.attrs['numGrids'] = len(grids)
         for i,g in enumerate(grids):
             self.export_grid(afile, level_node, g, i, field)
 
@@ -155,32 +156,32 @@
         return int_origin, level_int_origin, origin, dds
 
     def export_grid(self, afile, level_node, grid, i, field):
-        grid_node = afile.createGroup(level_node, "grid-%s" % i)
+        grid_node = afile.create_group("%s/grid-%s" % (level_node,i))
         int_origin, lint, origin, dds = self._convert_grid(grid)
-        grid_node._v_attrs.integerOrigin = int_origin
-        grid_node._v_attrs.origin = origin
-        grid_node._v_attrs.ghostzoneFlags = na.zeros(6, dtype='int32')
-        grid_node._v_attrs.numGhostzones = na.zeros(3, dtype='int32')
-        grid_node._v_attrs.dims = grid.ActiveDimensions[::-1].astype('int32')
+        grid_node.attrs['integerOrigin'] = int_origin
+        grid_node.attrs['origin'] = origin
+        grid_node.attrs['ghostzoneFlags'] = na.zeros(6, dtype='int32')
+        grid_node.attrs['numGhostzones'] = na.zeros(3, dtype='int32')
+        grid_node.attrs['dims'] = grid.ActiveDimensions[::-1].astype('int32')
         if not self.always_copy and self.pf.h.data_style == 6 \
            and field in self.pf.h.field_list:
             if grid.hierarchy.data_style == -1: # constructed grid
                 # if we can get conversion in amira we won't need to do this
                 ff = grid[field].astype('float32')
                 ff /= self.pf.conversion_factors.get(field, 1.0)
-                afile.createArray(grid_node, "grid-data", ff.swapaxes(0,2))
+                afile.create_dataset("%s/grid-data" % grid_node, data=ff.swapaxes(0,2))
             else:
                 tfn = os.path.abspath(afile.filename)
                 gfn = os.path.abspath(grid.filename)
                 fpn = os.path.commonprefix([tfn, grid.filename])
                 fn = grid.filename[len(os.path.commonprefix([tfn, grid.filename])):]
-                grid_node._v_attrs.referenceFileName = fn
-                grid_node._v_attrs.referenceDataPath = \
+                grid_node.attrs['referenceFileName'] = fn
+                grid_node.attrs['referenceDataPath'] = \
                     "/Grid%08i/%s" % (grid.id, field)
         else:
             # Export our array
-            afile.createArray(grid_node, "grid-data",
-                grid[field].astype('float32').swapaxes(0,2))
+            afile.create_dataset("%s/grid-data" % grid_node, 
+                                 data = grid[field].astype('float32').swapaxes(0,2))
 
     def _convert_coords(self, val):
         return (val - self.left_edge_offset)*self.mult_factor

Modified: trunk/yt/extensions/lightcone/HaloMask.py
==============================================================================
--- trunk/yt/extensions/lightcone/HaloMask.py	(original)
+++ trunk/yt/extensions/lightcone/HaloMask.py	Wed May  6 16:49:57 2009
@@ -28,7 +28,7 @@
 import yt.lagos as lagos
 import copy
 import numpy as na
-import tables as h5
+import h5py
 
 #### Note: assumption of box width 1.  I'll fix it someday.
 
@@ -50,8 +50,8 @@
     # Write out cube of masks from each slice.
     if cube_file is not None:
         mylog.info("Saving halo mask cube to %s." % cube_file)
-        output = h5.openFile(cube_file,'a')
-        output.createArray("/",'haloMaskCube',na.array(lightConeMask))
+        output = h5py.File(cube_file,'a')
+        output.create_dataset('haloMaskCube',data=na.array(lightConeMask))
         output.close()
 
     # Write out final mask.
@@ -62,8 +62,8 @@
         for mask in lightConeMask:
             finalMask *= mask
 
-        output = h5.openFile(mask_file,'a')
-        output.createArray("/",'haloMask',na.array(finalMask))
+        output = h5py.File(mask_file,'a')
+        output.create_dataset('haloMask',data=na.array(finalMask))
         output.close()
 
     return lightConeMask

Modified: trunk/yt/extensions/lightcone/LightCone.py
==============================================================================
--- trunk/yt/extensions/lightcone/LightCone.py	(original)
+++ trunk/yt/extensions/lightcone/LightCone.py	Wed May  6 16:49:57 2009
@@ -658,7 +658,7 @@
                 else:
                     self.lightConeParameters[param] = t
 
-    def _SaveLightConeStack(self,field=None,weight_field=None,filename=None):
+    def _SaveLightConeStack(self,field=None,weight_field=None,filename=None,over_write=True):
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
@@ -667,7 +667,7 @@
         field_node = "%s_%s" % (field,weight_field)
         weight_field_node = "weight_field_%s" % weight_field
 
-        import tables
+        import h5py
         if (filename is None):
             filename = "%s/%s_data" % (self.lightConeParameters['OutputDir'],self.lightConeParameters['OutputPrefix'])
         if not(filename.endswith('.h5')):
@@ -679,40 +679,48 @@
 
         if self.verbose: mylog.info("Writing light cone data to %s." % filename)
 
-        output = tables.openFile(filename, "a")
+        output = h5py.File(filename, "a")
 
-        try:
-            node_exists = output.isVisibleNode("/%s" % field_node)
-        except tables.exceptions.NoSuchNodeError:
-            node_exists = False
+        node_exists = field_node in output.listnames()
 
         if node_exists:
-            mylog.error("Dataset, %s, already exists in %s, not saving." % (field_node,filename))
+            if over_write:
+                mylog.info("Dataset, %s, already exists, overwriting." % field_node)
+                del output[field_node]
+            else:
+                mylog.info("Dataset, %s, already exists in %s, not saving." % (field_node,filename))
+                write_data = False
         else:
+            write_data = True
+
+        if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
             self.projectionStack = na.array(self.projectionStack)
-            field_dataset = output.createArray("/",field_node,self.projectionStack)
-            field_dataset._v_attrs.redshifts = redshiftList
-            field_dataset._v_attrs.ObserverRedshift = na.float(self.lightConeParameters['ObserverRedshift'])
-            field_dataset._v_attrs.FieldOfViewInArcMinutes = na.float(self.lightConeParameters['FieldOfViewInArcMinutes'])
-            field_dataset._v_attrs.ImageResolutionInArcSeconds = na.float(self.lightConeParameters['ImageResolutionInArcSeconds'])
+            field_dataset = output.create_dataset(field_node,data=self.projectionStack)
+            field_dataset.attrs['redshifts'] = redshiftList
+            field_dataset.attrs['ObserverRedshift'] = na.float(self.lightConeParameters['ObserverRedshift'])
+            field_dataset.attrs['FieldOfViewInArcMinutes'] = na.float(self.lightConeParameters['FieldOfViewInArcMinutes'])
+            field_dataset.attrs['ImageResolutionInArcSeconds'] = na.float(self.lightConeParameters['ImageResolutionInArcSeconds'])
 
         if (len(self.projectionWeightFieldStack) > 0):
-            try:
-                node_exists = output.isVisibleNode("/%s" % weight_field_node)
-            except tables.exceptions.NoSuchNodeError:
-                node_exists = False
-
             if node_exists:
-                mylog.error("Dataset, %s, already exists in %s, not saving." % (weight_field_node,filename))
+                if over_write:
+                    mylog.info("Dataset, %s, already exists, overwriting." % weight_field_node)
+                    del output[field_node]
+                else:
+                    mylog.info("Dataset, %s, already exists in %s, not saving." % (weight_field_node,filename))
+                    write_data = False
             else:
+                write_data = True
+
+            if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projectionWeightFieldStack = na.array(self.projectionWeightFieldStack)
-                weight_field_dataset = output.createArray("/",weight_field_node,self.projectionWeightFieldStack)
-                weight_field_dataset._v_attrs.redshifts = redshiftList
-                weight_field_dataset._v_attrs.ObserverRedshift = na.float(self.lightConeParameters['ObserverRedshift'])
-                weight_field_dataset._v_attrs.FieldOfViewInArcMinutes = na.float(self.lightConeParameters['FieldOfViewInArcMinutes'])
-                weight_field_dataset._v_attrs.ImageResolutionInArcSeconds = na.float(self.lightConeParameters['ImageResolutionInArcSeconds'])
+                weight_field_dataset = output.create_dataset(weight_field_node,data=self.projectionWeightFieldStack)
+                weight_field_dataset.attrs['redshifts'] = redshiftList
+                weight_field_dataset.attrs['ObserverRedshift'] = na.float(self.lightConeParameters['ObserverRedshift'])
+                weight_field_dataset.attrs['FieldOfViewInArcMinutes'] = na.float(self.lightConeParameters['FieldOfViewInArcMinutes'])
+                weight_field_dataset.attrs['ImageResolutionInArcSeconds'] = na.float(self.lightConeParameters['ImageResolutionInArcSeconds'])
 
         output.close()
 

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Wed May  6 16:49:57 2009
@@ -955,7 +955,7 @@
         for fn,g_list in self.hierarchy.cpu_map.items():
             to_read = na.intersect1d(g_list, self.source._grids)
             if len(to_read) == 0: continue
-            fh = tables.openFile(to_read[0].filename,'r')
+            fh = h5py.File(to_read[0].filename,'r')
             for g in to_read:
                 g.handle = fh
                 for field in ensure_list(self.fields):

Modified: trunk/yt/lagos/HaloFinding.py
==============================================================================
--- trunk/yt/lagos/HaloFinding.py	(original)
+++ trunk/yt/lagos/HaloFinding.py	Wed May  6 16:49:57 2009
@@ -134,12 +134,12 @@
     def write_particle_list(self, handle):
         self._processing = True
         gn = "Halo%08i" % (self.id)
-        handle.createGroup("/", gn)
+        handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
                    + ["particle_index"]:
-            handle.createArray("/%s" % gn, field, self[field])
-        n = handle.getNode("/", gn)
+            handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
+        n = handle["/%s" % gn]
         # set attributes on n
         self._processing = False
 
@@ -409,7 +409,7 @@
     @parallel_blocking_call
     def write_particle_lists(self, prefix):
         fn = "%s.h5" % self._get_filename(prefix)
-        f = tables.openFile(fn, "w")
+        f = h5py.File(fn, "w")
         for halo in self._groups:
             if not self._is_mine(halo): continue
             halo.write_particle_list(f)

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Wed May  6 16:49:57 2009
@@ -125,18 +125,14 @@
             self._data_mode = mode = 'r'
         else:
             self._data_mode = mode = 'a'
-        try:
-            self._data_file = tables.openFile(fn, mode)
-            my_name = self.get_data("/","MyName")
-            if my_name is None and self._data_mode == 'a':
-                self.save_data(str(self.parameter_file), "/", "MyName")
-            else:
-                if str(my_name.read())!=str(self.parameter_file):
-                    self._data_file.close()
-                    self._data_file = None
-        except:
-            self._data_file = None
-            pass
+        self.__create_data_file(fn)
+        self.__data_filename = fn
+        self._data_file = h5py.File(fn, self._data_mode)
+
+    @parallel_root_only
+    def __create_data_file(self, fn):
+        f = h5py.File(fn, 'a')
+        f.close()
 
     def _setup_data_queue(self):
         self.queue = _data_style_funcs[self.data_style][5]()
@@ -153,27 +149,38 @@
             [self.gridLeftEdge[:,0], self.gridRightEdge[:,1], self.gridLeftEdge[:,2]],
             ], dtype='float64')
 
-    def save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
+    def _save_data(self, array, node, name, set_attr=None, force=False, passthrough = False):
         """
         Arbitrary numpy data will be saved to the region in the datafile
         described by *node* and *name*.  If data file does not exist, it throws
         no error and simply does not save.
         """
-        if self._data_file is None or self._data_mode != 'a': return
+
+        if self._data_mode != 'a': return
         try:
-            node_loc = self._data_file.getNode(node)
-            if name in node_loc and force:
+            node_loc = self._data_file[node]
+            if name in node_loc.listnames() and force:
                 mylog.info("Overwriting node %s/%s", node, name)
-                self._data_file.removeNode(node, name, recursive=True)
-            elif name in node_loc and passthrough:
-                return
-        except tables.exceptions.NoSuchNodeError:
+                del self._data_file[node][name]
+            elif name in node_loc.listnames() and passthrough:
+                return        
+        except h5py.h5.ArgsError:
             pass
-        arr = self._data_file.createArray(node, name, array, createparents=True)
+        myGroup = self._data_file['/']
+        for q in node.split('/'):
+            if q: myGroup = myGroup.require_group(q)
+        arr = myGroup.create_dataset(name,data=array)
         if set_attr is not None:
-            for i, j in set_attr.items(): arr.setAttr(i,j)
+            for i, j in set_attr.items(): arr.attrs[i] = j
         self._data_file.flush()
 
+    def _reload_data_file(self, *args, **kwargs):
+        self._data_file.close()
+        del self._data_file
+        self._data_file = h5py.File(self.__data_filename, self._data_mode)
+
+    save_data = parallel_splitter(_save_data, _reload_data_file)
+
     def save_object(self, obj, name):
         s = cPickle.dumps(obj, protocol=-1)
         self.save_data(s, "/Objects", name, force = True)
@@ -182,7 +189,7 @@
         obj = self.get_data("/Objects", name)
         if obj is None:
             return
-        obj = cPickle.loads(obj.read())
+        obj = cPickle.loads(obj.value)
         if iterable(obj) and len(obj) == 2:
             obj = obj[1] # Just the object, not the pf
         if hasattr(obj, '_fix_pickle'): obj._fix_pickle()
@@ -195,11 +202,11 @@
         """
         if self._data_file == None:
             return None
-        try:
-            if node[0] != "/": node = "/%s" % node
-            return self._data_file.getNode(node, name)
-        except tables.exceptions.NoSuchNodeError:
+        if node[0] != "/": node = "/%s" % node
+        full_name = "%s/%s" % (node, name)
+        if full_name not in self._data_file:
             return None
+        return self._data_file["%s/%s" % (node, name)]
 
     def _close_data_file(self):
         if self._data_file:
@@ -859,7 +866,7 @@
             self.__setup_grid_tree()
         else:
             mylog.debug("Grabbing serialized tree data")
-            pTree = cPickle.loads(treeArray.read())
+            pTree = cPickle.loads(treeArray.value)
             self.gridReverseTree = list(self.get_data("/","ReverseTree"))
             self.gridTree = [ [ weakref.proxy(self.grids[i]) for i in pTree[j] ]
                 for j in range(self.num_grids) ]

Modified: trunk/yt/lagos/ParallelTools.py
==============================================================================
--- trunk/yt/lagos/ParallelTools.py	(original)
+++ trunk/yt/lagos/ParallelTools.py	Wed May  6 16:49:57 2009
@@ -145,10 +145,10 @@
 def parallel_blocking_call(func):
     @wraps(func)
     def barrierize(*args, **kwargs):
-        mylog.info("Entering barrier before %s", func.func_name)
+        mylog.debug("Entering barrier before %s", func.func_name)
         MPI.COMM_WORLD.Barrier()
         retval = func(*args, **kwargs)
-        mylog.info("Entering barrier after %s", func.func_name)
+        mylog.debug("Entering barrier after %s", func.func_name)
         MPI.COMM_WORLD.Barrier()
         return retval
     if parallel_capable:
@@ -156,6 +156,19 @@
     else:
         return func
 
+def parallel_splitter(f1, f2):
+    @wraps(f1)
+    def in_order(*args, **kwargs):
+        MPI.COMM_WORLD.Barrier()
+        if MPI.COMM_WORLD.rank == 0:
+            f1(*args, **kwargs)
+        MPI.COMM_WORLD.Barrier()
+        if MPI.COMM_WORLD.rank != 0:
+            f2(*args, **kwargs)
+        MPI.COMM_WORLD.Barrier()
+    if not parallel_capable: return f1
+    return in_order
+
 def parallel_root_only(func):
     @wraps(func)
     def root_only(*args, **kwargs):

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Wed May  6 16:49:57 2009
@@ -37,10 +37,9 @@
 
 import warnings
 try:
-     import tables
-     warnings.simplefilter("ignore", tables.NaturalNameWarning)
+     import h5py
 except ImportError:
-    mylog.warning("No PyTables. Data serialization will fail.")
+    mylog.warning("No h5py. Data serialization will fail.")
 
 from yt.arraytypes import *
 import weakref

Modified: trunk/yt/raven/FixedResolution.py
==============================================================================
--- trunk/yt/raven/FixedResolution.py	(original)
+++ trunk/yt/raven/FixedResolution.py	Wed May  6 16:49:57 2009
@@ -75,11 +75,11 @@
         return distance/dpy
 
     def export_hdf5(self, filename, fields = None):
-        import tables
+        import h5py
         if fields is None: fields = self.data.keys()
-        output = tables.openFile(filename, "a")
+        output = h5py.File(filename, "a")
         for field in fields:
-            output.createArray("/",field,self[field])
+            output.create_dataset(field,data=self[field])
         output.close()
 
     def export_fits(self, filename_prefix, fields = None):



More information about the yt-svn mailing list