[yt-svn] commit/yt: 10 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Mar 26 13:25:38 PDT 2014


10 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/4d7f979e5bd5/
Changeset:   4d7f979e5bd5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 00:48:28
Summary:     Fixing Astropy
Affected #:  1 file

diff -r 94fb98836a21f22f91e4d5524f2f0212e5caf161 -r 4d7f979e5bd58bf2845d705d2c5a9593289eeb90 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -10,14 +10,6 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-    from astropy import log
-    log.disable_exception_logging()
-except ImportError:
-    pass
-
 import stat
 import numpy as np
 import weakref
@@ -41,6 +33,37 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            import astropy.io.fits as pyfits
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            import astropy.wcs as pywcs
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            from astropy import log
+            try:
+                log.disable_exception_logging()
+            except:
+                pass
+            self._log = log
+        return self._log
+
+ap = astropy_imports()
+
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
@@ -145,11 +168,11 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, pyfits.HDUList):
+        if isinstance(filename, ap.pyfits.HDUList):
             self._handle = filename
             fname = filename.filename()
         else:
-            self._handle = pyfits.open(filename)
+            self._handle = ap.pyfits.open(filename)
             fname = filename
         for i, h in enumerate(self._handle):
             if h.is_image and h.data is not None:
@@ -162,7 +185,7 @@
             self.primary_header = primary_header
         self.shape = self._handle[self.first_image].shape
 
-        self.wcs = pywcs.WCS(header=self.primary_header)
+        self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
@@ -246,7 +269,13 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
-            if isinstance(args[0], pyfits.HDUList):
+            ext = args[0].rsplit(".", 1)[-1]
+        except:
+            return False
+        if ext.upper() not in ("FITS", "FTS"):
+            return False
+        try:
+            if isinstance(args[0], ap.pyfits.HDUList):
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True
@@ -255,7 +284,7 @@
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = pyfits.open(args[0])
+                fileh = ap.pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()


https://bitbucket.org/yt_analysis/yt/commits/716b2e9c66b7/
Changeset:   716b2e9c66b7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 01:05:23
Summary:     Another attempt at fixing astropy
Affected #:  1 file

diff -r 4d7f979e5bd58bf2845d705d2c5a9593289eeb90 -r 716b2e9c66b754398ee1f9f0aeb2bdfe3e7bb66b yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -11,6 +11,7 @@
 #-----------------------------------------------------------------------------
 
 import stat
+import types
 import numpy as np
 import weakref
 import warnings
@@ -268,14 +269,12 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
-            ext = args[0].rsplit(".", 1)[-1]
-        except:
-            return False
-        if ext.upper() not in ("FITS", "FTS"):
-            return False
-        try:
-            if isinstance(args[0], ap.pyfits.HDUList):
+            if args[0].__class__.__name__ == "HDUList":
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True


https://bitbucket.org/yt_analysis/yt/commits/1f08f2a2e939/
Changeset:   1f08f2a2e939
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 01:19:38
Summary:     Another attempt.
Affected #:  1 file

diff -r 716b2e9c66b754398ee1f9f0aeb2bdfe3e7bb66b -r 1f08f2a2e939cf588da8a57fa14f61d0a1fcf539 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -271,6 +271,9 @@
     def _is_valid(self, *args, **kwargs):
         if isinstance(args[0], types.StringTypes):
             ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
             if ext.upper() not in ("FITS", "FTS"):
                 return False
         try:


https://bitbucket.org/yt_analysis/yt/commits/8594b3c70b4e/
Changeset:   8594b3c70b4e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 01:34:13
Summary:     This disables _is_valid for FITS ... for now!
Affected #:  1 file

diff -r 1f08f2a2e939cf588da8a57fa14f61d0a1fcf539 -r 8594b3c70b4e0219eef0838235db7c1736aedbc7 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -269,6 +269,7 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        return False
         if isinstance(args[0], types.StringTypes):
             ext = args[0].rsplit(".", 1)[-1]
             if ext.upper() == "GZ":


https://bitbucket.org/yt_analysis/yt/commits/76764cd65c25/
Changeset:   76764cd65c25
Branch:      yt-3.0
User:        xarthisius
Date:        2014-03-26 10:40:39
Summary:     Try to import mpi4py in the global scope. Disable parallelism upon ImportError. Fixes issue #816
Affected #:  1 file

diff -r 94fb98836a21f22f91e4d5524f2f0212e5caf161 -r 76764cd65c25dd94a678dc12da632dec72dae343 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -13,14 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cPickle
 import cStringIO
 import itertools
 import logging
 import numpy as np
 import sys
+import os
+import traceback
+import types
+from functools import wraps
 
-from yt.funcs import *
+from yt.funcs import \
+    ensure_list, iterable, traceback_writer_hook
 
 from yt.config import ytcfg
 from yt.utilities.definitions import \
@@ -30,8 +34,16 @@
     QuadTree, merge_quadtrees
 from yt.units.yt_array import YTArray
 from yt.units.unit_registry import UnitRegistry
+from yt.utilities.exceptions import YTNoDataInObjectError
+from yt.utilities.logger import ytLogger as mylog
 
 parallel_capable = ytcfg.getboolean("yt", "__parallel")
+if parallel_capable:
+    try:
+        from mpi4py import MPI
+    except ImportError:
+        mylog.info("mpi4py was not found. Disabling parallel computation")
+        parallel_capable = False
 
 dtype_names = dict(
         float32 = "MPI.FLOAT",
@@ -51,7 +63,6 @@
 exe_name = os.path.basename(sys.executable)
 def enable_parallelism():
     global parallel_capable
-    from mpi4py import MPI
     parallel_capable = (MPI.COMM_WORLD.size > 1)
     if not parallel_capable: return False
     mylog.info("Global parallel computation enabled: %s / %s",
@@ -126,7 +137,7 @@
 
     def __iter__(self):
         for obj in self._objs: yield obj
-        
+
 class ParallelObjectIterator(ObjectIterator):
     """
     This takes an object, *pobj*, that implements ParallelAnalysisInterface,
@@ -149,7 +160,7 @@
                                 np.arange(len(self._objs)), self._skip)[self._offset]
             else:
                 self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
-        
+
     def __iter__(self):
         for gid in self.my_obj_ids:
             yield self._objs[gid]
@@ -298,7 +309,7 @@
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
         self.workgroups = []
-    
+
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
             size = len(self.available_ranks)
@@ -309,14 +320,14 @@
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
-        if name is None: 
+        if name is None:
             name = str(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
-    
+
     def free_workgroup(self, workgroup):
         # If you want to actually delete the workgroup you will need to
         # pop it out of the self.workgroups list so you don't have references
@@ -324,7 +335,7 @@
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
-            self.available_ranks.append(i) 
+            self.available_ranks.append(i)
         self.available_ranks.sort()
 
     def free_all(self):
@@ -427,7 +438,7 @@
                                                storage=storage):
             yield my_obj
         return
-    
+
     if not parallel_capable:
         njobs = 1
     my_communicator = communication_system.communicators[-1]
@@ -507,13 +518,13 @@
     Here is a simple example of a ring loop around a set of integers, with a
     custom dtype.
 
-    >>> dt = numpy.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
+    >>> dt = np.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
     >>> def gfunc(o):
-    ...     numpy.random.seed(o)
+    ...     np.random.seed(o)
     ...     rv = np.empty(1000, dtype=dt)
-    ...     rv['x'] = numpy.random.random(1000)
-    ...     rv['y'] = numpy.random.random(1000)
-    ...     rv['z'] = numpy.random.random(1000)
+    ...     rv['x'] = np.random.random(1000)
+    ...     rv['y'] = np.random.random(1000)
+    ...     rv['z'] = np.random.random(1000)
     ...     return rv
     ...
     >>> obj = range(8)
@@ -724,7 +735,7 @@
             return data
         elif datatype == "list" and op == "cat":
             recv_data = self.comm.allgather(data)
-            # Now flatten into a single list, since this 
+            # Now flatten into a single list, since this
             # returns us a list of lists.
             data = []
             while recv_data:
@@ -777,7 +788,7 @@
             if dtype != data.dtype:
                 data = data.astype(dtype)
             temp = data.copy()
-            self.comm.Allreduce([temp,get_mpi_type(dtype)], 
+            self.comm.Allreduce([temp,get_mpi_type(dtype)],
                                      [data,get_mpi_type(dtype)], op)
             return data
         else:
@@ -884,7 +895,7 @@
         self.comm.Send([buf[0], MPI.INT], dest=target)
         self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
-        
+
     def recv_quadtree(self, target, tgd, args):
         sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
@@ -982,7 +993,7 @@
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                recv.append(self.alltoallv_array(send[i,:].copy(),
                                                  total_size, offsets, sizes))
             recv = np.array(recv)
             return recv
@@ -1067,7 +1078,7 @@
 
     def partition_index_2d(self, axis):
         if not self._distributed:
-           return False, self.index.grid_collection(self.center, 
+           return False, self.index.grid_collection(self.center,
                                                         self.index.grids)
 
         xax, yax = x_dict[axis], y_dict[axis]
@@ -1143,7 +1154,7 @@
         LE, RE = left_edge[:], right_edge[:]
         if not self._distributed:
             raise NotImplemented
-            return LE, RE, re
+            return LE, RE #, re
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
@@ -1182,10 +1193,10 @@
 
         cc = MPI.Compute_dims(self.comm.size, 3)
         si = self.comm.size
-        
+
         factors = factor(si)
         xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
-        
+
         # Each entry of cuts is a two element list, that is:
         # [cut dim, number of cuts]
         cuts = []
@@ -1204,7 +1215,7 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-    
+
 class GroupOwnership(ParallelAnalysisInterface):
     def __init__(self, items):
         ParallelAnalysisInterface.__init__(self)
@@ -1229,7 +1240,7 @@
             self.pointer += 1
         if self.item is not old_item:
             self.switch()
-            
+
     def dec(self, n = -1):
         old_item = self.item
         if n == -1: n = self.comm.size


https://bitbucket.org/yt_analysis/yt/commits/75033f476463/
Changeset:   75033f476463
Branch:      yt-3.0
User:        xarthisius
Date:        2014-03-26 11:01:47
Summary:     Disable Astropy exception log only if it was enabled in the first place. Shamelessly stolen from astropy codebase itself. Fixes issue #817
Affected #:  1 file

diff -r 76764cd65c25dd94a678dc12da632dec72dae343 -r 75033f476463a9d10310668274f936d75fe17484 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -14,7 +14,8 @@
     import astropy.io.fits as pyfits
     import astropy.wcs as pywcs
     from astropy import log
-    log.disable_exception_logging()
+    if log.exception_logging_enabled():
+        log.disable_exception_logging()
 except ImportError:
     pass
 
@@ -55,11 +56,11 @@
 
     def __repr__(self):
         return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-    
+
 class FITSHierarchy(GridIndex):
 
     grid = FITSGrid
-    
+
     def __init__(self,pf,dataset_type='fits'):
         self.dataset_type = dataset_type
         self.field_indexes = {}
@@ -79,10 +80,10 @@
         for h in self._handle[self.parameter_file.first_image:]:
             if h.is_image:
                 self.field_list.append(("fits", h.name.lower()))
-                        
+
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
-                
+
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
@@ -101,12 +102,12 @@
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
-        
+
         self.grid_levels.flat[:] = 0
         self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
-        
+
     def _populate_grid_objects(self):
         for i in xrange(self.num_grids):
             self.grids[i]._prepare_grid()
@@ -115,7 +116,7 @@
 
     def _setup_derived_fields(self):
         super(FITSHierarchy, self)._setup_derived_fields()
-        [self.parameter_file.conversion_factors[field] 
+        [self.parameter_file.conversion_factors[field]
          for field in self.field_list]
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -125,8 +126,8 @@
             f = self.parameter_file.field_info[field]
             if f._function.func_name == "_TranslationFunc":
                 # Translating an already-converted field
-                self.parameter_file.conversion_factors[field] = 1.0 
-                
+                self.parameter_file.conversion_factors[field] = 1.0
+
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
@@ -135,7 +136,7 @@
     _field_info_class = FITSFieldInfo
     _dataset_type = "fits"
     _handle = None
-    
+
     def __init__(self, filename, dataset_type='fits',
                  primary_header = None,
                  sky_conversion = None,
@@ -155,7 +156,7 @@
             if h.is_image and h.data is not None:
                 self.first_image = i
                 break
-        
+
         if primary_header is None:
             self.primary_header = self._handle[self.first_image].header
         else:
@@ -182,7 +183,7 @@
 
         Dataset.__init__(self, fname, dataset_type)
         self.storage_filename = storage_filename
-            
+
         self.refine_by = 2
         # For plotting to APLpy
         self.hdu_list = self._handle
@@ -201,7 +202,7 @@
         self.length_unit = self.quan(length_factor,length_unit)
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")        
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -219,14 +220,14 @@
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])
-            
+
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5
-            
+
         # Get the simulation time
         try:
             self.current_time = self.parameters["time"]
@@ -234,7 +235,7 @@
             mylog.warning("Cannot find time")
             self.current_time = 0.0
             pass
-        
+
         # For now we'll ignore these
         self.periodicity = (False,)*3
         self.current_redshift = self.omega_lambda = self.omega_matter = \


https://bitbucket.org/yt_analysis/yt/commits/64060c4f07bd/
Changeset:   64060c4f07bd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 15:28:38
Summary:     Merging Kacper's PR with mine.
Affected #:  1 file

diff -r 75033f476463a9d10310668274f936d75fe17484 -r 64060c4f07bd32ffd103df3a2f8f8d373b494eb9 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -10,16 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-    from astropy import log
-    if log.exception_logging_enabled():
-        log.disable_exception_logging()
-except ImportError:
-    pass
-
 import stat
+import types
 import numpy as np
 import weakref
 import warnings
@@ -42,6 +34,35 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            import astropy.io.fits as pyfits
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            import astropy.wcs as pywcs
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            from astropy import log
+            if log.exception_logging_enabled():
+                log.disable_exception_logging()
+            self._log = log
+        return self._log
+
+ap = astropy_imports()
+
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
@@ -146,11 +167,11 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, pyfits.HDUList):
+        if isinstance(filename, ap.pyfits.HDUList):
             self._handle = filename
             fname = filename.filename()
         else:
-            self._handle = pyfits.open(filename)
+            self._handle = ap.pyfits.open(filename)
             fname = filename
         for i, h in enumerate(self._handle):
             if h.is_image and h.data is not None:
@@ -163,7 +184,7 @@
             self.primary_header = primary_header
         self.shape = self._handle[self.first_image].shape
 
-        self.wcs = pywcs.WCS(header=self.primary_header)
+        self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
@@ -246,8 +267,16 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        return False
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
-            if isinstance(args[0], pyfits.HDUList):
+            if args[0].__class__.__name__ == "HDUList":
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True
@@ -256,7 +285,7 @@
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = pyfits.open(args[0])
+                fileh = ap.pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()


https://bitbucket.org/yt_analysis/yt/commits/f488edc6c4b0/
Changeset:   f488edc6c4b0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 16:19:12
Summary:     Fixing up parallelism, and removing module-level calls.
Affected #:  1 file

diff -r 64060c4f07bd32ffd103df3a2f8f8d373b494eb9 -r f488edc6c4b09e7055e82cc7f99e6240e259bf84 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -37,13 +37,10 @@
 from yt.utilities.exceptions import YTNoDataInObjectError
 from yt.utilities.logger import ytLogger as mylog
 
-parallel_capable = ytcfg.getboolean("yt", "__parallel")
-if parallel_capable:
-    try:
-        from mpi4py import MPI
-    except ImportError:
-        mylog.info("mpi4py was not found. Disabling parallel computation")
-        parallel_capable = False
+# We default to *no* parallelism unless it gets turned on, in which case this
+# will be changed.
+MPI = None
+parallel_capable = False
 
 dtype_names = dict(
         float32 = "MPI.FLOAT",
@@ -60,13 +57,21 @@
 
 # Set up translation table and import things
 
-exe_name = os.path.basename(sys.executable)
 def enable_parallelism():
-    global parallel_capable
+    global parallel_capable, MPI
+    try:
+        from mpi4py import MPI as _MPI
+    except ImportError:
+        mylog.info("mpi4py was not found. Disabling parallel computation")
+        parallel_capable = False
+        return
+    MPI = _MPI
+    exe_name = os.path.basename(sys.executable)
     parallel_capable = (MPI.COMM_WORLD.size > 1)
     if not parallel_capable: return False
     mylog.info("Global parallel computation enabled: %s / %s",
                MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+    communication_system.push(MPI.COMM_WORLD)
     ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
     ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
     ytcfg["yt","__parallel"] = "True"
@@ -594,10 +599,7 @@
     communicators = []
 
     def __init__(self):
-        if parallel_capable:
-            self.communicators.append(Communicator(MPI.COMM_WORLD))
-        else:
-            self.communicators.append(Communicator(None))
+        self.communicators.append(Communicator(None))
 
     def push(self, new_comm):
         if not isinstance(new_comm, Communicator):
@@ -1024,9 +1026,6 @@
                 break
 
 communication_system = CommunicationSystem()
-if parallel_capable:
-    ranks = np.arange(MPI.COMM_WORLD.size)
-    communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
     comm = None


https://bitbucket.org/yt_analysis/yt/commits/1d40ca181e07/
Changeset:   1d40ca181e07
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-26 20:29:11
Summary:     Ensuring self.log gets accessed.
Affected #:  1 file

diff -r f488edc6c4b09e7055e82cc7f99e6240e259bf84 -r 1d40ca181e07e46e323ed3f1fde2c346b44a1793 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -40,6 +40,7 @@
     def pyfits(self):
         if self._pyfits is None:
             import astropy.io.fits as pyfits
+            self.log
             self._pyfits = pyfits
         return self._pyfits
 
@@ -48,6 +49,7 @@
     def pywcs(self):
         if self._pywcs is None:
             import astropy.wcs as pywcs
+            self.log
             self._pywcs = pywcs
         return self._pywcs
 


https://bitbucket.org/yt_analysis/yt/commits/3a11f1dfd969/
Changeset:   3a11f1dfd969
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-03-26 21:25:31
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #766)

Fixing Astropy
Affected #:  2 files

diff -r 986e420bbe8bb373ebc2db5699198f5ae087324c -r 3a11f1dfd9699f172b1ff73697dacd11302fdd2a yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -10,15 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-    from astropy import log
-    log.disable_exception_logging()
-except ImportError:
-    pass
-
 import stat
+import types
 import numpy as np
 import weakref
 import warnings
@@ -41,6 +34,37 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            import astropy.io.fits as pyfits
+            self.log
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            import astropy.wcs as pywcs
+            self.log
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            from astropy import log
+            if log.exception_logging_enabled():
+                log.disable_exception_logging()
+            self._log = log
+        return self._log
+
+ap = astropy_imports()
+
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
@@ -55,11 +79,11 @@
 
     def __repr__(self):
         return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-    
+
 class FITSHierarchy(GridIndex):
 
     grid = FITSGrid
-    
+
     def __init__(self,pf,dataset_type='fits'):
         self.dataset_type = dataset_type
         self.field_indexes = {}
@@ -79,10 +103,10 @@
         for h in self._handle[self.parameter_file.first_image:]:
             if h.is_image:
                 self.field_list.append(("fits", h.name.lower()))
-                        
+
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
-                
+
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
@@ -101,12 +125,12 @@
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
-        
+
         self.grid_levels.flat[:] = 0
         self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
-        
+
     def _populate_grid_objects(self):
         for i in xrange(self.num_grids):
             self.grids[i]._prepare_grid()
@@ -115,7 +139,7 @@
 
     def _setup_derived_fields(self):
         super(FITSHierarchy, self)._setup_derived_fields()
-        [self.parameter_file.conversion_factors[field] 
+        [self.parameter_file.conversion_factors[field]
          for field in self.field_list]
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -125,8 +149,8 @@
             f = self.parameter_file.field_info[field]
             if f._function.func_name == "_TranslationFunc":
                 # Translating an already-converted field
-                self.parameter_file.conversion_factors[field] = 1.0 
-                
+                self.parameter_file.conversion_factors[field] = 1.0
+
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
@@ -135,7 +159,7 @@
     _field_info_class = FITSFieldInfo
     _dataset_type = "fits"
     _handle = None
-    
+
     def __init__(self, filename, dataset_type='fits',
                  primary_header = None,
                  sky_conversion = None,
@@ -145,24 +169,24 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, pyfits.HDUList):
+        if isinstance(filename, ap.pyfits.HDUList):
             self._handle = filename
             fname = filename.filename()
         else:
-            self._handle = pyfits.open(filename)
+            self._handle = ap.pyfits.open(filename)
             fname = filename
         for i, h in enumerate(self._handle):
             if h.is_image and h.data is not None:
                 self.first_image = i
                 break
-        
+
         if primary_header is None:
             self.primary_header = self._handle[self.first_image].header
         else:
             self.primary_header = primary_header
         self.shape = self._handle[self.first_image].shape
 
-        self.wcs = pywcs.WCS(header=self.primary_header)
+        self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
@@ -182,7 +206,7 @@
 
         Dataset.__init__(self, fname, dataset_type)
         self.storage_filename = storage_filename
-            
+
         self.refine_by = 2
         # For plotting to APLpy
         self.hdu_list = self._handle
@@ -201,7 +225,7 @@
         self.length_unit = self.quan(length_factor,length_unit)
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")        
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -219,14 +243,14 @@
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])
-            
+
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5
-            
+
         # Get the simulation time
         try:
             self.current_time = self.parameters["time"]
@@ -234,7 +258,7 @@
             mylog.warning("Cannot find time")
             self.current_time = 0.0
             pass
-        
+
         # For now we'll ignore these
         self.periodicity = (False,)*3
         self.current_redshift = self.omega_lambda = self.omega_matter = \
@@ -245,8 +269,16 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        return False
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
-            if isinstance(args[0], pyfits.HDUList):
+            if args[0].__class__.__name__ == "HDUList":
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True
@@ -255,7 +287,7 @@
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = pyfits.open(args[0])
+                fileh = ap.pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()

diff -r 986e420bbe8bb373ebc2db5699198f5ae087324c -r 3a11f1dfd9699f172b1ff73697dacd11302fdd2a yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -13,14 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cPickle
 import cStringIO
 import itertools
 import logging
 import numpy as np
 import sys
+import os
+import traceback
+import types
+from functools import wraps
 
-from yt.funcs import *
+from yt.funcs import \
+    ensure_list, iterable, traceback_writer_hook
 
 from yt.config import ytcfg
 from yt.utilities.definitions import \
@@ -30,8 +34,13 @@
     QuadTree, merge_quadtrees
 from yt.units.yt_array import YTArray
 from yt.units.unit_registry import UnitRegistry
+from yt.utilities.exceptions import YTNoDataInObjectError
+from yt.utilities.logger import ytLogger as mylog
 
-parallel_capable = ytcfg.getboolean("yt", "__parallel")
+# We default to *no* parallelism unless it gets turned on, in which case this
+# will be changed.
+MPI = None
+parallel_capable = False
 
 dtype_names = dict(
         float32 = "MPI.FLOAT",
@@ -48,14 +57,21 @@
 
 # Set up translation table and import things
 
-exe_name = os.path.basename(sys.executable)
 def enable_parallelism():
-    global parallel_capable
-    from mpi4py import MPI
+    global parallel_capable, MPI
+    try:
+        from mpi4py import MPI as _MPI
+    except ImportError:
+        mylog.info("mpi4py was not found. Disabling parallel computation")
+        parallel_capable = False
+        return
+    MPI = _MPI
+    exe_name = os.path.basename(sys.executable)
     parallel_capable = (MPI.COMM_WORLD.size > 1)
     if not parallel_capable: return False
     mylog.info("Global parallel computation enabled: %s / %s",
                MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+    communication_system.push(MPI.COMM_WORLD)
     ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
     ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
     ytcfg["yt","__parallel"] = "True"
@@ -126,7 +142,7 @@
 
     def __iter__(self):
         for obj in self._objs: yield obj
-        
+
 class ParallelObjectIterator(ObjectIterator):
     """
     This takes an object, *pobj*, that implements ParallelAnalysisInterface,
@@ -149,7 +165,7 @@
                                 np.arange(len(self._objs)), self._skip)[self._offset]
             else:
                 self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
-        
+
     def __iter__(self):
         for gid in self.my_obj_ids:
             yield self._objs[gid]
@@ -298,7 +314,7 @@
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
         self.workgroups = []
-    
+
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
             size = len(self.available_ranks)
@@ -309,14 +325,14 @@
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
-        if name is None: 
+        if name is None:
             name = str(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
-    
+
     def free_workgroup(self, workgroup):
         # If you want to actually delete the workgroup you will need to
         # pop it out of the self.workgroups list so you don't have references
@@ -324,7 +340,7 @@
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
-            self.available_ranks.append(i) 
+            self.available_ranks.append(i)
         self.available_ranks.sort()
 
     def free_all(self):
@@ -427,7 +443,7 @@
                                                storage=storage):
             yield my_obj
         return
-    
+
     if not parallel_capable:
         njobs = 1
     my_communicator = communication_system.communicators[-1]
@@ -507,13 +523,13 @@
     Here is a simple example of a ring loop around a set of integers, with a
     custom dtype.
 
-    >>> dt = numpy.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
+    >>> dt = np.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
     >>> def gfunc(o):
-    ...     numpy.random.seed(o)
+    ...     np.random.seed(o)
     ...     rv = np.empty(1000, dtype=dt)
-    ...     rv['x'] = numpy.random.random(1000)
-    ...     rv['y'] = numpy.random.random(1000)
-    ...     rv['z'] = numpy.random.random(1000)
+    ...     rv['x'] = np.random.random(1000)
+    ...     rv['y'] = np.random.random(1000)
+    ...     rv['z'] = np.random.random(1000)
     ...     return rv
     ...
     >>> obj = range(8)
@@ -583,10 +599,7 @@
     communicators = []
 
     def __init__(self):
-        if parallel_capable:
-            self.communicators.append(Communicator(MPI.COMM_WORLD))
-        else:
-            self.communicators.append(Communicator(None))
+        self.communicators.append(Communicator(None))
 
     def push(self, new_comm):
         if not isinstance(new_comm, Communicator):
@@ -724,7 +737,7 @@
             return data
         elif datatype == "list" and op == "cat":
             recv_data = self.comm.allgather(data)
-            # Now flatten into a single list, since this 
+            # Now flatten into a single list, since this
             # returns us a list of lists.
             data = []
             while recv_data:
@@ -777,7 +790,7 @@
             if dtype != data.dtype:
                 data = data.astype(dtype)
             temp = data.copy()
-            self.comm.Allreduce([temp,get_mpi_type(dtype)], 
+            self.comm.Allreduce([temp,get_mpi_type(dtype)],
                                      [data,get_mpi_type(dtype)], op)
             return data
         else:
@@ -884,7 +897,7 @@
         self.comm.Send([buf[0], MPI.INT], dest=target)
         self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
-        
+
     def recv_quadtree(self, target, tgd, args):
         sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
@@ -982,7 +995,7 @@
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                recv.append(self.alltoallv_array(send[i,:].copy(),
                                                  total_size, offsets, sizes))
             recv = np.array(recv)
             return recv
@@ -1013,9 +1026,6 @@
                 break
 
 communication_system = CommunicationSystem()
-if parallel_capable:
-    ranks = np.arange(MPI.COMM_WORLD.size)
-    communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
     comm = None
@@ -1067,7 +1077,7 @@
 
     def partition_index_2d(self, axis):
         if not self._distributed:
-           return False, self.index.grid_collection(self.center, 
+           return False, self.index.grid_collection(self.center,
                                                         self.index.grids)
 
         xax, yax = x_dict[axis], y_dict[axis]
@@ -1143,7 +1153,7 @@
         LE, RE = left_edge[:], right_edge[:]
         if not self._distributed:
             raise NotImplemented
-            return LE, RE, re
+            return LE, RE #, re
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
@@ -1182,10 +1192,10 @@
 
         cc = MPI.Compute_dims(self.comm.size, 3)
         si = self.comm.size
-        
+
         factors = factor(si)
         xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
-        
+
         # Each entry of cuts is a two element list, that is:
         # [cut dim, number of cuts]
         cuts = []
@@ -1204,7 +1214,7 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-    
+
 class GroupOwnership(ParallelAnalysisInterface):
     def __init__(self, items):
         ParallelAnalysisInterface.__init__(self)
@@ -1229,7 +1239,7 @@
             self.pointer += 1
         if self.item is not old_item:
             self.switch()
-            
+
     def dec(self, n = -1):
         old_item = self.item
         if n == -1: n = self.comm.size

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list