[Yt-svn] commit/yt: 4 new changesets

Bitbucket commits-noreply at bitbucket.org
Thu Feb 24 18:27:36 PST 2011


4 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/20a49bc35fbd/
changeset:   r3766:20a49bc35fbd
branch:      yt
user:        samskillman
date:        2011-02-25 03:18:17
summary:     Adding an initial go at streamlines.  It uses the amr kD-tree to
traverse the bricks.  The streamline integrator uses a 4th order Runge
Kutta scheme.  It could still use a bit of optimizing/improving, but
it works.  This also required writing a mechanism to merge the
kd-Trees because the streamlines don't stay in a particular domain, so
that was also added.
affected #:  4 files (7.8 KB)

--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx	Tue Feb 22 21:01:15 2011 -0500
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx	Thu Feb 24 19:18:17 2011 -0700
@@ -705,6 +705,75 @@
             gaussian = self.star_coeff * exp(-gexp/self.star_sigma_num)
             for i in range(3): rgba[i] += gaussian*dt*colors[i]
         kdtree_utils.kd_res_rewind(ballq)
+        
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def integrate_streamline(self, pos, np.float64_t h):
+        cdef np.float64_t k1[3], k2[3], k3[3], k4[3]
+        cdef np.float64_t newpos[3], oldpos[3]
+        for i in range(3):
+            newpos[i] = oldpos[i] = pos[i]
+        self.get_vector_field(newpos, k1)
+        for i in range(3):
+            newpos[i] = oldpos[i] + 0.5*k1[i]*h
+
+        if not (self.left_edge[0] < newpos[0] and newpos[0] < self.right_edge[0] and \
+                self.left_edge[1] < newpos[1] and newpos[1] < self.right_edge[1] and \
+                self.left_edge[2] < newpos[2] and newpos[2] < self.right_edge[2]):
+            for i in range(3):
+                pos[i] = newpos[i]
+            return
+        
+        self.get_vector_field(newpos, k2)
+        for i in range(3):
+            newpos[i] = oldpos[i] + 0.5*k2[i]*h
+
+        if not (self.left_edge[0] <= newpos[0] and newpos[0] <= self.right_edge[0] and \
+                self.left_edge[1] <= newpos[1] and newpos[1] <= self.right_edge[1] and \
+                self.left_edge[2] <= newpos[2] and newpos[2] <= self.right_edge[2]):
+            for i in range(3):
+                pos[i] = newpos[i]
+            return
+
+        self.get_vector_field(newpos, k3)
+        for i in range(3):
+            newpos[i] = oldpos[i] + k3[i]*h
+            
+        if not (self.left_edge[0] <= newpos[0] and newpos[0] <= self.right_edge[0] and \
+                self.left_edge[1] <= newpos[1] and newpos[1] <= self.right_edge[1] and \
+                self.left_edge[2] <= newpos[2] and newpos[2] <= self.right_edge[2]):
+            for i in range(3):
+                pos[i] = newpos[i]
+            return
+
+        self.get_vector_field(newpos, k4)
+
+        for i in range(3):
+            pos[i] = oldpos[i] + h*(k1[i]/6.0 + k2[i]/3.0 + k3[i]/3.0 + k4[i]/6.0)
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void get_vector_field(self, np.float64_t pos[3],
+                               np.float64_t *vel):
+        cdef np.float64_t dp[3]
+        cdef int ci[3] 
+        cdef np.float64_t vel_mag = 0.0
+        for i in range(3):
+            ci[i] = (int)((pos[i]-self.left_edge[i])/self.dds[i])
+            dp[i] = (pos[i] - self.left_edge[i])%(self.dds[i])
+
+        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                          + ci[1] * (self.dims[2] + 1) + ci[2]
+        
+        for i in range(3):
+            vel[i] = offset_interpolate(self.dims, dp, self.data[i] + offset)
+            vel_mag += vel[i]*vel[i]
+        vel_mag = np.sqrt(vel_mag)
+        if vel_mag != 0.0:
+            for i in range(3):
+                vel[i] /= vel_mag
 
 cdef class GridFace:
     cdef int direction


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Feb 22 21:01:15 2011 -0500
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Thu Feb 24 19:18:17 2011 -0700
@@ -126,7 +126,7 @@
 class AMRKDTree(HomogenizedVolume):
     def __init__(self, pf,  l_max=None, le=None, re=None,
                  fields=None, no_ghost=False,
-                 tree_type='domain',log_fields=None):
+                 tree_type='domain',log_fields=None, merge_trees=False):
         r"""
         AMR kd-Tree object, a homogenized volume.
 
@@ -198,7 +198,14 @@
             prone to longer data IO times.  If all the data can fit in
             memory on each cpu, this can be the fastest option for
             multiple ray casts on the same dataset.
-
+        merge_trees: bool, optional
+            If True, the distributed kD-tree can be merged
+            together in an allgather-like fashion.  This should not be
+            used for parallel rendering, as it will cause all
+            processors to render all bricks.  This is primarily useful
+            for applications that are not domain decomposed but still
+            want to build the kD-tree in parallel. Default:False
+        
 
         Returns
         -------
@@ -306,19 +313,30 @@
         t2 = time()
         mylog.debug('It took %e seconds to build AMRKDTree.tree' % (t2-t1))
         
-        # Build Tree Dictionary
         self._build_tree_dict()
 
+        # If the full amr kD-tree is requested, merge the results from
+        # the parallel build.
+        if merge_trees and nprocs > 1:
+            self.join_parallel_trees()            
+            self.my_l_corner = self.domain_left_edge
+            self.my_r_corner = self.domain_right_edge
+        
         # Initialize the kd leafs:
         self.initialize_leafs()
         
         # Add properties to leafs/nodes
         self.total_cost = self.count_cost()
+
         # Calculate the total volume spanned by the tree
         self.volume = self.count_volume()
         mylog.debug('Cost is %d' % self.total_cost)
         mylog.debug('Volume is %e' % self.volume) 
 
+        self.current_saved_grids = []
+        self.current_vcds = []
+
+
     def _build_tree_dict(self):
         self.tree_dict = {}
         for node in self.depth_traverse():
@@ -511,6 +529,32 @@
         self.brick_dimensions = na.array(self.brick_dimensions)
         del current_saved_grids, current_vcds
         self.bricks_loaded = True
+
+    def get_brick_data(self,current_node):
+        if current_node.brick is not None:
+            return 
+
+        if current_node.grid in self.current_saved_grids:
+            dds = self.current_vcds[self.current_saved_grids.index(current_node.grid)]
+        else:
+            dds = []
+            for i,field in enumerate(self.fields):
+                vcd = current_node.grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
+                if self.log_fields[i]: vcd = na.log10(vcd)
+                dds.append(vcd)
+                self.current_saved_grids.append(current_node.grid)
+                self.current_vcds.append(dds)
+                
+        data = [d[current_node.li[0]:current_node.ri[0]+1,
+                  current_node.li[1]:current_node.ri[1]+1,
+                  current_node.li[2]:current_node.ri[2]+1].copy() for d in dds]
+
+        current_node.brick = PartitionedGrid(current_node.grid.id, len(self.fields), data,
+                                             current_node.l_corner.copy(), 
+                                             current_node.r_corner.copy(), 
+                                             current_node.dims.astype('int64'))
+
+        return
         
     def set_leaf_props(self,thisnode):
         r"""Given a leaf, gathers grid, indices, dimensions, and cost properties.
@@ -541,7 +585,49 @@
         for node in self.depth_traverse():
             if node.grid is not None:
                 self.set_leaf_props(node)
+
+    def join_parallel_trees(self):
+        self.trim_references()
+        self.merge_trees()
+        self.rebuild_references()
                 
+    def trim_references(self):
+        par_tree_depth = long(na.log2(nprocs))
+        for i in range(2**nprocs):
+            if ((i + 1)>>par_tree_depth) == 1:
+                # There are nprocs nodes that meet this criteria
+                if (i+1-nprocs) is not my_rank:
+                    self.tree_dict.pop(i)
+                    continue
+        for node in self.tree_dict.itervalues():
+            del node.parent, node.left_child, node.right_child
+            try:
+                del node.grids
+            except:
+                pass
+            if not na.isreal(node.grid):
+                node.grid = node.grid.id
+        if self.tree_dict[0].split_pos is None:
+            self.tree_dict.pop(0)
+    def merge_trees(self):
+        self.tree_dict = self._mpi_joindict(self.tree_dict)
+
+    def rebuild_references(self):
+        self.tree = self.tree_dict[0]
+        self.tree.parent = None
+        for node in self.depth_traverse():
+            try:
+                node.parent = self.tree_dict[_parent_id(node.id)]
+            except:
+                node.parent = None
+            try:
+                node.left_child = self.tree_dict[_lchild_id(node.id)]
+            except:
+                node.left_child = None
+            try:
+                node.right_child = self.tree_dict[_rchild_id(node.id)]
+            except:
+                node.right_child = None
 
     def count_cost(self):
         r"""Counts the cost of the entire tree, while filling in branch costs.
@@ -1075,3 +1161,20 @@
         f.create_dataset("/split_pos",data=kd_split_pos)
         f.create_dataset("/kd_owners",data=kd_owners)
         f.close()
+        
+    def corners_to_line(self,lc, rc):
+        x = na.array([ lc[0], lc[0], lc[0], lc[0], lc[0],
+                       rc[0], rc[0], rc[0], rc[0], rc[0],
+                       rc[0], lc[0], lc[0], rc[0],
+                       rc[0], lc[0], lc[0] ])
+        
+        y = na.array([ lc[1], lc[1], rc[1], rc[1], lc[1],
+                       lc[1], lc[1], rc[1], rc[1], lc[1],
+                       lc[1], lc[1], rc[1], rc[1],
+                       rc[1], rc[1], lc[1] ])
+        
+        z = na.array([ lc[2], rc[2], rc[2], lc[2], lc[2],
+                       lc[2], rc[2], rc[2], lc[2], lc[2],
+                       rc[2], rc[2], rc[2], rc[2],
+                       lc[2], lc[2], lc[2] ])
+        return x,y,z


--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py	Tue Feb 22 21:01:15 2011 -0500
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py	Thu Feb 24 19:18:17 2011 -0700
@@ -610,6 +610,26 @@
         return (data, final)
 
     @parallel_passthrough
+    def _mpi_cat2d_double_array(self, data):
+        self._barrier()
+        data, final_shape = data
+        if MPI.COMM_WORLD.rank == 0:
+            new_array = na.zeros(final_shape,dtype='float64')
+            new_array[0:data.shape[0],:,:] = data[:]
+            rows_filled = data.shape[0]
+            for i in range(1, MPI.COMM_WORLD.size):
+                new_rows = MPI.COMM_WORLD.recv(source=i, tag=0)
+                new_array[rows_filled:rows_filled+new_rows,:,:] = _recv_array(
+                    source=i, tag=0).reshape((new_rows, final_shape[1], final_shape[2]))
+                rows_filled += new_rows
+            data = new_array
+        else:
+            MPI.COMM_WORLD.send(data.shape[0], 0, 0)
+            _send_array(data.ravel(), dest=0, tag=0)
+        data = MPI.COMM_WORLD.bcast(data)
+        return data
+            
+    @parallel_passthrough
     def _mpi_catdict(self, data):
         field_keys = data.keys()
         field_keys.sort()


--- a/yt/visualization/api.py	Tue Feb 22 21:01:15 2011 -0500
+++ b/yt/visualization/api.py	Thu Feb 24 19:18:17 2011 -0700
@@ -55,3 +55,6 @@
 
 from easy_plots import \
     plot_type_registry
+
+from streamlines import \
+     StreamLines


http://bitbucket.org/yt_analysis/yt/changeset/2cb24145eba6/
changeset:   r3767:2cb24145eba6
branch:      yt
user:        samskillman
date:        2011-02-25 03:18:53
summary:     Merging
affected #:  0 files (0 bytes)

--- a/yt/data_objects/data_containers.py	Thu Feb 24 19:18:17 2011 -0700
+++ b/yt/data_objects/data_containers.py	Thu Feb 24 19:18:53 2011 -0700
@@ -1456,7 +1456,7 @@
         self._obtain_fields(fields, self._node_name)
         fields = [f for f in fields if f not in self.data]
         if len(fields) == 0: return
-        tree = self._get_tree(len(fields) + int(self._weight is not None))
+        tree = self._get_tree(len(fields))
         coord_data = []
         field_data = []
         dxs = []
@@ -1482,7 +1482,7 @@
         for level in range(0, self._max_level + 1):
             npos, nvals, nwvals = tree.get_all_from_level(level, False)
             coord_data.append(npos)
-            if self._weight is not None: nvals /= nwvals
+            if self._weight is not None: nvals /= nwvals[:,None]
             field_data.append(nvals)
             weight_data.append(nwvals)
             gs = self.source.select_grids(level)
@@ -1534,7 +1534,7 @@
             masked_data  = [field_data[field].copy().astype('float64') * weight_data
                                 for field in fields]
             del field_data
-            wdl = self.dls[-1]
+            wdl = dls[-1]
         full_proj = [self.func(field, axis=self.axis) * dl
                      for field, dl in zip(masked_data, dls)]
         weight_proj = self.func(weight_data, axis=self.axis) * wdl
@@ -1544,11 +1544,12 @@
         else:
             used_data = na.array([1.0], dtype='bool')
             used_points = slice(None)
-        xind, yind = [arr[used_points].ravel() for arr in na.indices(full_proj[0].shape)]
+        xind, yind = [arr[used_points].ravel()
+                      for arr in na.indices(full_proj[0].shape)]
         start_index = grid.get_global_startindex()
         xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
         ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
-        to_add = na.array([d[used_points].ravel() for d in full_proj])
+        to_add = na.array([d[used_points].ravel() for d in full_proj], order='F')
         tree.add_array_to_tree(grid.Level, xpoints, ypoints, 
                     to_add, weight_proj[used_points].ravel())
 


--- a/yt/utilities/_amr_utils/QuadTree.pyx	Thu Feb 24 19:18:17 2011 -0700
+++ b/yt/utilities/_amr_utils/QuadTree.pyx	Thu Feb 24 19:18:53 2011 -0700
@@ -98,7 +98,9 @@
 
 cdef class QuadTree:
     cdef int nvals
-    cdef np.int64_t po2[80]
+    # Hardcode to a maximum 80 levels of refinement.
+    # TODO: Update when we get to yottascale.
+    cdef np.int64_t po2[80] 
     cdef QuadTreeNode ***root_nodes
     cdef np.int64_t top_grid_dims[2]
 


http://bitbucket.org/yt_analysis/yt/changeset/42dbe8a92c19/
changeset:   r3768:42dbe8a92c19
branch:      yt
user:        samskillman
date:        2011-02-25 03:19:36
summary:     Merging.
affected #:  0 files (0 bytes)

--- a/yt/visualization/volume_rendering/camera.py	Thu Feb 24 19:18:53 2011 -0700
+++ b/yt/visualization/volume_rendering/camera.py	Thu Feb 24 19:19:36 2011 -0700
@@ -45,7 +45,7 @@
                  volume = None, fields = None,
                  log_fields = None,
                  sub_samples = 5, pf = None,
-                 use_kd=True, l_max=None, no_ghost=False,
+                 use_kd=True, l_max=None, no_ghost=True,
                  tree_type='domain',expand_factor=1.0,
                  le=None, re=None):
         r"""A viewpoint into a volume, for volume rendering.
@@ -203,6 +203,8 @@
         self.use_kd = use_kd
         self.l_max = l_max
         self.no_ghost = no_ghost
+        if self.no_ghost:
+            mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
         self.tree_type = tree_type
         if volume is None:
             if self.use_kd:


http://bitbucket.org/yt_analysis/yt/changeset/fd91f581b0e0/
changeset:   r3769:fd91f581b0e0
branch:      yt
user:        samskillman
date:        2011-02-25 03:25:31
summary:     Oops, should actually add streamlines.py to the repo.  Has an example
script in the docstrings.
affected #:  1 file (3.6 KB)

--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/yt/visualization/streamlines.py	Thu Feb 24 19:25:31 2011 -0700
@@ -0,0 +1,103 @@
+import numpy as na
+from yt.funcs import *
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface
+from yt.utilities.amr_kdtree.api import AMRKDTree
+from yt.config import ytcfg
+
+my_rank = ytcfg.getint("yt", "__parallel_rank")
+nprocs = ytcfg.getint("yt", "__parallel_size")
+
+class StreamLines(ParallelAnalysisInterface):
+    r"""A collection of streamlines that flow through the volume
+
+        Examples
+        --------
+
+        >>> c = na.array([0.5]*3)
+        >>> c = na.array([0.5]*3)
+        >>> N = 100
+        >>> scale = 1.0
+        >>> pos_dx = na.random.random((N,3))*scale-scale/2.
+        >>> pos = c+pos_dx
+
+        >>> streamlines = StreamLines(pf,pos,'x-velocity', 'y-velocity', 'z-velocity', length=1.0) 
+        >>> streamlines.integrate_through_volume()
+
+        >>> import matplotlib.pylab as pl
+        >>> from mpl_toolkits.mplot3d import Axes3D
+        >>> fig=pl.figure() 
+        >>> ax = Axes3D(fig)
+        >>> for stream in streamlines.streamlines:
+        >>>     stream = stream[na.all(stream != 0.0, axis=1)]
+        >>>     ax.plot3D(stream[:,0], stream[:,1], stream[:,2], alpha=0.1)
+        >>> pl.savefig('streamlines.png')
+        """
+    def __init__(self, pf, positions, xfield, yfield, zfield, volume=None,
+                 dx=None, length=None):
+        self.pf = pf
+        self.start_positions = positions
+        self.N = self.start_positions.shape[0]
+        self.xfield = xfield
+        self.yfield = yfield
+        self.zfield = zfield
+        if volume is None:
+            volume = AMRKDTree(self.pf, fields=[self.xfield,self.yfield,self.zfield],
+                            log_fields=[False,False,False], merge_trees=True)
+        self.volume = volume
+        if dx is None:
+            dx = self.pf.h.get_smallest_dx()
+        self.dx = dx
+        if length is None:
+            length = na.max(self.pf.domain_right_edge-self.pf.domain_left_edge)
+        self.length = length
+        self.steps = int(length/dx)
+        self.streamlines = na.zeros((self.N,self.steps,3), dtype='float64')
+
+    def integrate_through_volume(self):
+        my_count = 0
+        # Initialize streamlines
+        for i in range(self.N):
+            if i%nprocs != my_rank:
+                continue
+            self.streamlines[my_count,0,:] = self.start_positions[i]
+            my_count+=1
+        self.streamlines = self.streamlines[:my_count,:,:]
+        
+        pbar = get_pbar("Streamlining", self.N)
+        for i,stream in enumerate(self.streamlines):
+            step = self.steps
+            while (step > 1):
+                this_brick = self.volume.locate_brick(stream[-step,:])
+                step = self._integrate_through_brick(this_brick, stream, step)
+            pbar.update(i)
+        pbar.finish()
+
+        self._finalize_parallel()
+            
+    def _finalize_parallel(self):
+        if nprocs <= 1: return
+        self.streamlines = self._mpi_cat2d_double_array([
+            self.streamlines, [self.N,self.steps,3]])
+
+    def _integrate_through_brick(self, node, stream, step, periodic=False):
+        my_dx = node.grid.dds[0]
+        while (step > 1):
+            self.volume.get_brick_data(node)
+            brick = node.brick
+            stream[-step+1] = stream[-step]
+            brick.integrate_streamline(stream[-step+1], my_dx)
+            if na.any(stream[-step+1,:] <= self.pf.domain_left_edge) | \
+                   na.any(stream[-step+1,:] >= self.pf.domain_right_edge):
+                return 0
+
+            if na.any(stream[-step+1,:] < node.l_corner) | \
+                   na.any(stream[-step+1,:] >= node.r_corner):
+                return step-1
+            step -= 1
+        return step
+
+    
+
+
+

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list