[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Jan 10 07:20:36 PST 2013


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/71056b6a6ce1/
changeset:   71056b6a6ce1
branch:      yt
user:        xarthisius
date:        2013-01-10 16:14:03
summary:     [chombo] close all hdf5 file handles once they're no longer necessary
affected #:  1 file

diff -r ad241a5772fc214f75370fae5ba3da7f457e13c0 -r 71056b6a6ce1bf89c361a8f3874f22579d65aea0 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,7 @@
      pluto2enzoDict, \
      yt2plutoFieldsDict, \
      parameterDict \
-     
+
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -74,7 +74,7 @@
         """
         Return the integer starting index for each dimension at the current
         level.
-        
+
         """
         if self.start_index != None:
             return self.start_index
@@ -96,7 +96,7 @@
 class ChomboHierarchy(AMRHierarchy):
 
     grid = ChomboGrid
-    
+
     def __init__(self,pf,data_style='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
@@ -127,7 +127,7 @@
                 particle_position_z = float(line.split(' ')[3])
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py                                                                                                             
+                # copied from object_finding_mixin.py
                 mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
@@ -146,7 +146,7 @@
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
         self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
-    
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
@@ -156,10 +156,10 @@
         self.num_grids = 0
         for lev in self._levels:
             self.num_grids += self._fhandle[lev]['Processors'].len()
-        
+
     def _parse_hierarchy(self):
         f = self._fhandle # shortcut
-        
+
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
         levels = f.keys()[1:]
@@ -211,7 +211,7 @@
     _hierarchy_class = ChomboHierarchy
     _fieldinfo_fallback = ChomboFieldInfo
     _fieldinfo_known = KnownChomboFields
-    
+
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
         fileh = h5py.File(filename,'r')
@@ -220,7 +220,8 @@
         self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-        
+        fileh.close()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -274,6 +275,7 @@
             self.dimensionality = 3
             fileh = h5py.File(self.parameter_filename,'r')
             self.refine_by = fileh['/level_0'].attrs['ref_ratio']
+            fileh.close()
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -288,7 +290,7 @@
         lines = open(self.ini_filename).readlines()
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
-            try: 
+            try:
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
@@ -316,13 +318,14 @@
         RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                  
+
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
         L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        fileh.close()
         return R_index - L_index
- 
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:


https://bitbucket.org/yt_analysis/yt/commits/213e04419915/
changeset:   213e04419915
branch:      yt
user:        MatthewTurk
date:        2013-01-10 16:20:32
summary:     Merged in xarthisius/yt (pull request #395: [chombo] close all hdf5 file handles once they're no longer necessary)
affected #:  1 file

diff -r c5e0f2a0ac7a143690f17f9cf48e2ebc41c683c8 -r 213e0441991554ca0392e7e8a7905114cf06842b yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -42,7 +42,7 @@
      pluto2enzoDict, \
      yt2plutoFieldsDict, \
      parameterDict \
-     
+
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
      AMRGridPatch
@@ -74,7 +74,7 @@
         """
         Return the integer starting index for each dimension at the current
         level.
-        
+
         """
         if self.start_index != None:
             return self.start_index
@@ -96,7 +96,7 @@
 class ChomboHierarchy(AMRHierarchy):
 
     grid = ChomboGrid
-    
+
     def __init__(self,pf,data_style='chombo_hdf5'):
         self.domain_left_edge = pf.domain_left_edge
         self.domain_right_edge = pf.domain_right_edge
@@ -127,7 +127,7 @@
                 particle_position_z = float(line.split(' ')[3])
                 coord = [particle_position_x, particle_position_y, particle_position_z]
                 # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py                                                                                                             
+                # copied from object_finding_mixin.py
                 mask=np.ones(self.num_grids)
                 for i in xrange(len(coord)):
                     np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
@@ -146,7 +146,7 @@
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
         self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
-    
+
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
@@ -156,10 +156,10 @@
         self.num_grids = 0
         for lev in self._levels:
             self.num_grids += self._fhandle[lev]['Processors'].len()
-        
+
     def _parse_hierarchy(self):
         f = self._fhandle # shortcut
-        
+
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
         levels = f.keys()[1:]
@@ -211,7 +211,7 @@
     _hierarchy_class = ChomboHierarchy
     _fieldinfo_fallback = ChomboFieldInfo
     _fieldinfo_known = KnownChomboFields
-    
+
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
         fileh = h5py.File(filename,'r')
@@ -220,7 +220,8 @@
         self.fullplotdir = os.path.abspath(filename)
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-        
+        fileh.close()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file
@@ -274,6 +275,7 @@
             self.dimensionality = 3
             fileh = h5py.File(self.parameter_filename,'r')
             self.refine_by = fileh['/level_0'].attrs['ref_ratio']
+            fileh.close()
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -288,7 +290,7 @@
         lines = open(self.ini_filename).readlines()
         # read the file line by line, storing important parameters
         for lineI, line in enumerate(lines):
-            try: 
+            try:
                 param, sep, vals = map(rstrip,line.partition(' '))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
@@ -316,13 +318,14 @@
         RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                  
+
     def __calc_domain_dimensions(self):
         fileh = h5py.File(self.parameter_filename,'r')
         L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
         R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        fileh.close()
         return R_index - L_index
- 
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list