[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri May 25 11:49:36 PDT 2012


2 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/319fcd5d44b7/
changeset:   319fcd5d44b7
branch:      yt
user:        ngoldbaum
date:        2012-05-25 20:44:30
summary:     Upload_image now points directly to the image rather than an imgur page.  This
should avoid embarrasing images appearing with the uploaded image on the imgur
page.
affected #:  1 file

diff -r 3442625bb24f66b1cd15273f7feb708342bad20a -r 319fcd5d44b73635de8880ca941ed21a0aa76711 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1430,7 +1430,7 @@
         if 'upload' in rv and 'links' in rv['upload']:
             print
             print "Image successfully uploaded!  You can find it at:"
-            print "    %s" % (rv['upload']['links']['imgur_page'])
+            print "    %s" % (rv['upload']['links']['original'])
             print
             print "If you'd like to delete it, visit this page:"
             print "    %s" % (rv['upload']['links']['delete_page'])



https://bitbucket.org/yt_analysis/yt/changeset/eef0ecf484eb/
changeset:   eef0ecf484eb
branch:      yt
user:        ngoldbaum
date:        2012-05-25 20:45:27
summary:     Merging in yt_analysis/yt
affected #:  5 files

diff -r 319fcd5d44b73635de8880ca941ed21a0aa76711 -r eef0ecf484eb4669591a6e3ff5b3950e19981767 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -240,6 +240,8 @@
             pass
         elif isinstance(center, (types.ListType, types.TupleType, na.ndarray)):
             center = na.array(center)
+        elif center in ("c", "center"):
+            center = self.pf.domain_center
         elif center == ("max"): # is this dangerous for race conditions?
             center = self.pf.h.find_max("Density")[1]
         elif center.startswith("max_"):


diff -r 319fcd5d44b73635de8880ca941ed21a0aa76711 -r eef0ecf484eb4669591a6e3ff5b3950e19981767 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -287,6 +287,7 @@
         uniquedims[i] = <np.float64_t *> \
                 alloca(2*n_grids * sizeof(np.float64_t))
     my_max = 0
+    best_dim = -1
     for dim in range(3):
         n_unique = 0
         uniques = uniquedims[dim]


diff -r 319fcd5d44b73635de8880ca941ed21a0aa76711 -r eef0ecf484eb4669591a6e3ff5b3950e19981767 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -1012,7 +1012,7 @@
                     # This node belongs to someone else, move along
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
-                
+
             # If we are down to one grid, we are either in it or the parent grid
             if len(current_node.grids) == 1:
                 thisgrid = current_node.grids[0]
@@ -1031,25 +1031,27 @@
                         if len(children) > 0:
                             current_node.grids = self.pf.hierarchy.grids[na.array(children,copy=False)]
                             current_node.parent_grid = thisgrid
-                            # print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
+                            #print 'My single grid covers the rest of the volume, and I have children, about to iterate on them'
                             del children
                             continue
 
                     # Else make a leaf node (brick container)
+                    #print 'My single grid covers the rest of the volume, and I have no children', thisgrid
                     set_leaf(current_node, thisgrid, current_node.l_corner, current_node.r_corner)
-                    # print 'My single grid covers the rest of the volume, and I have no children'
                     current_node, previous_node = self.step_depth(current_node, previous_node)
                     continue
 
             # If we don't have any grids, this volume belongs to the parent        
             if len(current_node.grids) == 0:
+                #print 'This volume does not have a child grid, so it belongs to my parent!'
                 set_leaf(current_node, current_node.parent_grid, current_node.l_corner, current_node.r_corner)
-                # print 'This volume does not have a child grid, so it belongs to my parent!'
                 current_node, previous_node = self.step_depth(current_node, previous_node)
                 continue
 
             # If we've made it this far, time to build a dividing node
-            self._build_dividing_node(current_node)
+            # print 'Building dividing node'
+            # Continue if building failed
+            if self._build_dividing_node(current_node): continue
 
             # Step to the nest node in a depth-first traversal.
             current_node, previous_node = self.step_depth(current_node, previous_node)
@@ -1058,10 +1060,10 @@
         '''
         Given a node, finds all the choices for the next dividing plane.  
         '''
-        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         # For some reason doing dim 0 separately is slightly faster.
         # This could be rewritten to all be in the loop below.
 
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
         best_dim, split, less_ids, greater_ids = \
             kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
         return data[:,:,best_dim], best_dim, split, less_ids, greater_ids
@@ -1071,8 +1073,19 @@
         Makes the current node a dividing node, and initializes the
         left and right children.
         '''
-        
-        data,best_dim,split,less_ids,greater_ids = self._get_choices(current_node)
+
+        data = na.array([(child.LeftEdge, child.RightEdge) for child in current_node.grids],copy=False)
+        best_dim, split, less_ids, greater_ids = \
+            kdtree_get_choices(data, current_node.l_corner, current_node.r_corner)
+
+        del data
+
+        # Here we break out if no unique grids were found. In this case, there
+        # are likely overlapping grids, and we assume that the first grid takes
+        # precedence.  This is fragile.
+        if best_dim == -1:
+            current_node.grids = [current_node.grids[0]]
+            return 1
 
         current_node.split_ax = best_dim
         current_node.split_pos = split
@@ -1080,7 +1093,7 @@
         #greater_ids0 = (split < data[:,1])
         #assert(na.all(less_ids0 == less_ids))
         #assert(na.all(greater_ids0 == greater_ids))
-        
+
         current_node.left_child = MasterNode(my_id=_lchild_id(current_node.id),
                                              parent=current_node,
                                              parent_grid=current_node.parent_grid,
@@ -1099,7 +1112,9 @@
         # build to work.  The other deletions are just to save memory.
         del current_node.grids, current_node.parent_grid, current_node.brick,\
             current_node.li, current_node.ri, current_node.dims
-        
+
+        return 0
+
     def traverse(self, back_center, front_center, image):
         r"""Traverses the kd-Tree, casting the partitioned grids from back to
             front.


diff -r 319fcd5d44b73635de8880ca941ed21a0aa76711 -r eef0ecf484eb4669591a6e3ff5b3950e19981767 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -82,11 +82,15 @@
         if cls.npfs > 1:
             self(args)
         else:
-            if len(getattr(args, "pf", [])) > 1:
+            pf_args = getattr(args, "pf", [])
+            if len(pf_args) > 1:
                 pfs = args.pf
                 for pf in pfs:
                     args.pf = pf
                     self(args)
+            elif len(pf_args) == 0:
+                pfs = []
+                self(args)
             else:
                 args.pf = getattr(args, 'pf', [None])[0]
                 self(args)
@@ -105,6 +109,8 @@
 _common_options = dict(
     pf      = dict(short="pf", action=GetParameterFiles,
                    nargs="+", help="Parameter files to run on"),
+    opf     = dict(action=GetParameterFiles, dest="pf",
+                   nargs="*", help="(Optional) Parameter files to run on"),
     axis    = dict(short="-a", long="--axis",
                    action="store", type=int,
                    dest="axis", default=4,
@@ -1269,7 +1275,8 @@
                  help="At startup, find all *.hierarchy files in the CWD"),
             dict(short="-d", long="--debug", action="store_true",
                  default = False, dest="debug",
-                 help="Add a debugging mode for cell execution")
+                 help="Add a debugging mode for cell execution"),
+            "opf"
             )
     description = \
         """


diff -r 319fcd5d44b73635de8880ca941ed21a0aa76711 -r eef0ecf484eb4669591a6e3ff5b3950e19981767 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -288,7 +288,7 @@
         if size is None:
             size = len(self.available_ranks)
         if len(self.available_ranks) < size:
-            print 'Not enough resources available'
+            print 'Not enough resources available', size, self.available_ranks
             raise RuntimeError
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
@@ -315,6 +315,26 @@
         for wg in self.workgroups:
             self.free_workgroup(wg)
 
+    @classmethod
+    def from_sizes(cls, sizes):
+        sizes = ensure_list(sizes)
+        pool = cls()
+        rank = pool.comm.rank
+        for i,size in enumerate(sizes):
+            if iterable(size):
+                size, name = size
+            else:
+                name = "workgroup_%02i" % i
+            pool.add_workgroup(size, name = name)
+        for wg in pool.workgroups:
+            if rank in wg.ranks: workgroup = wg
+        return pool, workgroup
+
+    def __getitem__(self, key):
+        for wg in self.workgroups:
+            if wg.name == key: return wg
+        raise KeyError(key)
+
 class ResultsStorage(object):
     slots = ['result', 'result_id']
     result = None
@@ -517,24 +537,24 @@
         raise NotImplementedError
 
     @parallel_passthrough
-    def mpi_bcast(self, data):
+    def mpi_bcast(self, data, root = 0):
         # The second check below makes sure that we know how to communicate
         # this type of array. Otherwise, we'll pickle it.
         if isinstance(data, na.ndarray) and \
                 get_mpi_type(data.dtype) is not None:
-            if self.comm.rank == 0:
+            if self.comm.rank == root:
                 info = (data.shape, data.dtype)
             else:
                 info = ()
-            info = self.comm.bcast(info, root=0)
-            if self.comm.rank != 0:
+            info = self.comm.bcast(info, root=root)
+            if self.comm.rank != root:
                 data = na.empty(info[0], dtype=info[1])
             mpi_type = get_mpi_type(info[1])
-            self.comm.Bcast([data, mpi_type], root = 0)
+            self.comm.Bcast([data, mpi_type], root = root)
             return data
         else:
             # Use pickled methods.
-            data = self.comm.bcast(data, root = 0)
+            data = self.comm.bcast(data, root = root)
             return data
 
     def preload(self, grids, fields, io_handler):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list