[yt-svn] commit/yt: 2 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Feb 13 12:54:43 PST 2013


2 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/740ea6ff9ec4/
changeset:   740ea6ff9ec4
branch:      yt
user:        ngoldbaum
date:        2013-02-13 21:51:21
summary:     Fixing cutting plane to_pw(), adding cutting plane tests.
affected #:  2 files

diff -r d6fe5086342edfa314a842ec8291055c672a01b6 -r 740ea6ff9ec4464097aa369f87b90677cd526df3 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1387,9 +1387,11 @@
         else:
             self.fields = ensure_list(fields)
         from yt.visualization.plot_window import \
-            GetOffAxisBoundsAndCenter, PWViewerMPL
+            GetObliqueWindowParameters, PWViewerMPL
         from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
-        (bounds, center_rot) = GetOffAxisBoundsAndCenter(normal, center, width, self.pf)
+        (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+        if axes_unit is None and units != ('1', '1'):
+            axes_units = units
         pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
                          frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
         pw.set_axes_unit(axes_unit)

diff -r d6fe5086342edfa314a842ec8291055c672a01b6 -r 740ea6ff9ec4464097aa369f87b90677cd526df3 yt/data_objects/tests/test_cutting_plane.py
--- /dev/null
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -0,0 +1,45 @@
+from yt.testing import *
+import os
+
+def setup():
+    from yt.config import ytcfg
+    ytcfg["yt","__withintesting"] = "True"
+
+def teardown_func(fns):
+    for fn in fns:
+        os.remove(fn)
+
+def test_cutting_plane():
+    for nprocs in [8, 1]:
+        # We want to test both 1 proc and 8 procs, to make sure that
+        # parallelism isn't broken
+        pf = fake_random_pf(64, nprocs = nprocs)
+        dims = pf.domain_dimensions
+        center = [0.5,0.5,0.5]
+        normal = [1,1,1]
+        fns = []
+        cut = pf.h.cutting(normal, center, ["Ones", "Density"])
+        yield assert_equal, cut["Ones"].sum(), cut["Ones"].size
+        yield assert_equal, cut["Ones"].min(), 1.0
+        yield assert_equal, cut["Ones"].max(), 1.0
+        pw = cut.to_pw()
+        fns += pw.save()
+        frb = cut.to_frb((1.0,'unitary'), 64)
+        for cut_field in ['Ones', 'Density']:
+            yield assert_equal, frb[cut_field].info['data_source'], \
+                cut.__str__()
+            yield assert_equal, frb[cut_field].info['axis'], \
+                4
+            yield assert_equal, frb[cut_field].info['field'], \
+                cut_field
+            yield assert_equal, frb[cut_field].info['units'], \
+                pf.field_info[cut_field].get_units()
+            yield assert_equal, frb[cut_field].info['xlim'], \
+                frb.bounds[:2]
+            yield assert_equal, frb[cut_field].info['ylim'], \
+                frb.bounds[2:]
+            yield assert_equal, frb[cut_field].info['length_to_cm'], \
+                pf['cm']
+            yield assert_equal, frb[cut_field].info['center'], \
+                cut.center
+        teardown_func(fns)


https://bitbucket.org/yt_analysis/yt/commits/a9ad0cb73590/
changeset:   a9ad0cb73590
branch:      yt
user:        ngoldbaum
date:        2013-02-13 21:51:46
summary:     Merged yt_analysis/yt into yt
affected #:  27 files

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,6 +5152,7 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
-0000000000000000000000000000000000000000 hop callback

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -7,8 +7,8 @@
 # There are a few options, but you only need to set *one* of them.  And
 # that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
 # installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up).
+# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
+# check the current directory and one up.
 #
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
@@ -49,7 +49,7 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
 YT_DIR=""
 
 # If you need to pass anything to matplotlib, do so here.
@@ -261,7 +261,7 @@
         echo " to avoid conflicts with other command-line programs "
         echo " (like eog and evince, for example)."
     fi
-    if [$INST_SCIPY -eq 1]
+    if [ $INST_SCIPY -eq 1 ]
     then
 	echo
 	echo "Looks like you've requested that the install script build SciPy."
@@ -293,9 +293,9 @@
 echo
 echo "========================================================================"
 echo
-echo "Hi there!  This is the YT installation script.  We're going to download"
+echo "Hi there!  This is the yt installation script.  We're going to download"
 echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
 echo
 echo "Inside the installation script you can set a few variables.  Here's what"
 echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
@@ -334,7 +334,7 @@
 echo "be installing PyX"
 
 printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
-get_willwont ${INST_PYX}
+get_willwont ${INST_SCIPY}
 echo "be installing scipy"
 
 printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
@@ -476,7 +476,7 @@
 echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
 echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
-echo '52d1127de2208aaae693d16fef10ffc9b8663081bece83b7597d65706e9568af3b9e56bd211878774e1ebed92e21365ee9c49602a0ff5e48f89f12244d79c161  mercurial-2.4.tar.gz' > mercurial-2.4.tar.gz.sha512
+echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
@@ -509,7 +509,7 @@
 get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
 get_ytproject matplotlib-1.2.0.tar.gz
-get_ytproject mercurial-2.4.tar.gz
+get_ytproject mercurial-2.5.1.tar.gz
 get_ytproject ipython-0.13.1.tar.gz
 get_ytproject h5py-2.1.0.tar.gz
 get_ytproject Cython-0.17.1.tar.gz
@@ -636,7 +636,7 @@
 
 if [ ! -e Python-2.7.3/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  YT loves you."
+    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
     [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
     cd Python-2.7.3
     ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -654,7 +654,7 @@
 if [ $INST_HG -eq 1 ]
 then
     echo "Installing Mercurial."
-    do_setup_py mercurial-2.4
+    do_setup_py mercurial-2.5.1
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -725,7 +725,7 @@
 	    echo "Building LAPACK"
 	    cd lapack-3.4.2/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -142,18 +142,30 @@
         if self.CoM is not None:
             return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        if isinstance(self, FOFHalo):
-            c_vec = np.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
-        else:
-            c_vec = self.maximum_density_location() - self.pf.domain_center
-        cx = (cx - c_vec[0])
-        cy = (cy - c_vec[1])
-        cz = (cz - c_vec[2])
-        com = np.array([v - np.floor(v) for v in [cx, cy, cz]])
-        return (com * pm).sum(axis=1) / pm.sum() + c_vec
+        c = {}
+        c[0] = self["particle_position_x"]
+        c[1] = self["particle_position_y"]
+        c[2] = self["particle_position_z"]
+        c_vec = np.zeros(3)
+        com = []
+        for i in range(3):
+            # A halo is likely periodic around a boundary if the distance 
+            # between the max and min particle
+            # positions are larger than half the box. 
+            # So skip the rest if the converse is true.
+            # Note we might make a change here when periodicity-handling is
+            # fully implemented.
+            if (c[i].max() - c[i].min()) < (self.pf.domain_width[i] / 2.):
+                com.append(c[i])
+                continue
+            # Now we want to flip around only those close to the left boundary.
+            d_left = c[i] - self.pf.domain_left_edge[i]
+            sel = (d_left <= (self.pf.domain_width[i]/2))
+            c[i][sel] += self.pf.domain_width[i]
+            com.append(c[i])
+        com = np.array(com)
+        c = (com * pm).sum(axis=1) / pm.sum()
+        return c%self.pf.domain_width
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to
@@ -809,7 +821,6 @@
     _radjust = 1.05
 
     def __init__(self, pf, id, size=None, CoM=None,
-
         max_dens_point=None, group_total_mass=None, max_radius=None, bulk_vel=None,
         rms_vel=None, fnames=None, mag_A=None, mag_B=None, mag_C=None,
         e1_vec=None, tilt=None, supp=None):
@@ -843,6 +854,10 @@
             self.supp = {}
         else:
             self.supp = supp
+        self._saved_fields = {}
+        self._ds_sort = None
+        self._particle_mask = None
+
 
     def __getitem__(self, key):
         # This function will try to get particle data in one of three ways,

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -132,7 +132,6 @@
         not stored in enzo datasets, so must be entered by hand.
         sigma8input=%f primordial_index=%f omega_baryon0=%f
         """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        time.sleep(1)
         
         # Do the calculations.
         self.sigmaM()

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -7,7 +7,7 @@
 
 _fields = ("Density", "x-velocity", "y-velocity", "z-velocity")
 
-def test_covering_grid():
+def test_streamlines():
     # We decompose in different ways
     cs = np.mgrid[0.47:0.53:2j,0.47:0.53:2j,0.47:0.53:2j]
     cs = np.array([a.ravel() for a in cs]).T

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -451,7 +451,7 @@
         self.current_time = self.parameters["time"]
 
         # Determine if this is a periodic box
-        p = [self.parameters.get("%sl_boundary_type" % ax, None) == Periodic for ax in 'xyz']
+        p = [self.parameters.get("%sl_boundary_type" % ax, None) == "periodic" for ax in 'xyz']
         self.periodicity = tuple(p)
 
         # Determine cosmological parameters.

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -22,6 +22,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import itertools as it
 import numpy as np
 from yt.funcs import *
 from numpy.testing import assert_array_equal, assert_almost_equal, \
@@ -163,3 +164,100 @@
                  for field,offset in zip(fields,offsets))
     ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
     return ug
+
+def expand_keywords(keywords, full=False):
+    """
+    expand_keywords is a means for testing all possible keyword
+    arguments in the nosetests.  Simply pass it a dictionary of all the
+    keyword arguments and all of the values for these arguments that you
+    want to test.
+
+    It will return a list of **kwargs dicts containing combinations of
+    the various kwarg values you passed it.  These can then be passed
+    to the appropriate function in nosetests. 
+
+    If full=True, then every possible combination of keywords is produced,
+    otherwise, every keyword option is included at least once in the output
+    list.  Be careful, by using full=True, you may be in for an exponentially
+    larger number of tests! 
+
+    keywords : dict
+        a dictionary where the keys are the keywords for the function,
+        and the values of each key are the possible values that this key
+        can take in the function
+
+   full : bool
+        if set to True, every possible combination of given keywords is 
+        returned
+
+    Returns
+    -------
+    array of dicts
+        An array of **kwargs dictionaries to be individually passed to
+        the appropriate function matching these kwargs.
+
+    Examples
+    --------
+    >>> keywords = {}
+    >>> keywords['dpi'] = (50, 100, 200)
+    >>> keywords['cmap'] = ('algae', 'jet')
+    >>> list_of_kwargs = expand_keywords(keywords)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'jet', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}], dtype=object)
+
+    >>> list_of_kwargs = expand_keywords(keywords, full=True)
+    >>> print list_of_kwargs
+
+    array([{'cmap': 'algae', 'dpi': 50}, 
+           {'cmap': 'algae', 'dpi': 100},
+           {'cmap': 'algae', 'dpi': 200}, 
+           {'cmap': 'jet', 'dpi': 50},
+           {'cmap': 'jet', 'dpi': 100}, 
+           {'cmap': 'jet', 'dpi': 200}], dtype=object)
+
+    >>> for kwargs in list_of_kwargs:
+    ...     write_projection(*args, **kwargs)
+    """
+
+    # if we want every possible combination of keywords, use iter magic
+    if full:
+        keys = sorted(keywords)
+        list_of_kwarg_dicts = np.array([dict(zip(keys, prod)) for prod in \
+                              it.product(*(keywords[key] for key in keys))])
+            
+    # if we just want to probe each keyword, but not necessarily every 
+    # combination
+    else:
+        # Determine the maximum number of values any of the keywords has
+        num_lists = 0
+        for val in keywords.values():
+            if isinstance(val, str):
+                num_lists = max(1.0, num_lists)
+            else:
+                num_lists = max(len(val), num_lists)
+    
+        # Construct array of kwargs dicts, each element of the list is a different
+        # **kwargs dict.  each kwargs dict gives a different combination of
+        # the possible values of the kwargs
+    
+        # initialize array
+        list_of_kwarg_dicts = np.array([dict() for x in range(num_lists)])
+    
+        # fill in array
+        for i in np.arange(num_lists):
+            list_of_kwarg_dicts[i] = {}
+            for key in keywords.keys():
+                # if it's a string, use it (there's only one)
+                if isinstance(keywords[key], str):
+                    list_of_kwarg_dicts[i][key] = keywords[key]
+                # if there are more options, use the i'th val
+                elif i < len(keywords[key]):
+                    list_of_kwarg_dicts[i][key] = keywords[key][i]
+                # if there are not more options, use the 0'th val
+                else:
+                    list_of_kwarg_dicts[i][key] = keywords[key][0]
+
+    return list_of_kwarg_dicts

diff -r 740ea6ff9ec4464097aa369f87b90677cd526df3 -r a9ad0cb73590875e67ca6538692fb7c7fccbf9dc yt/utilities/amr_kdtree/amr_kdtools.py
--- /dev/null
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -0,0 +1,402 @@
+"""
+AMR kD-Tree Tools 
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+import numpy as np
+from yt.funcs import *
+from yt.utilities.lib import kdtree_get_choices
+
+def _lchild_id(node_id): return (node_id<<1)
+def _rchild_id(node_id): return (node_id<<1) + 1
+def _parent_id(node_id): return (node_id-1) >> 1
+
+class Node(object):
+    def __init__(self, parent, left, right,
+            left_edge, right_edge, grid_id, node_id):
+        self.left = left
+        self.right = right
+        self.left_edge = left_edge
+        self.right_edge = right_edge
+        self.grid = grid_id
+        self.parent = parent
+        self.id = node_id
+        self.data = None
+        self.split = None
+
+class Split(object):
+    def __init__(self, dim, pos):
+        self.dim = dim
+        self.pos = pos
+
+def should_i_build(node, rank, size):
+    if (node.id < size) or (node.id >= 2*size):
+        return True
+    elif node.id - size == rank:
+        return True
+    else:
+        return False
+
+def add_grids(node, gles, gres, gids, rank, size):
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, gles, gres, gids, rank, size)
+    else:
+        less_ids = gles[:,node.split.dim] < node.split.pos
+        if len(less_ids) > 0:
+            add_grids(node.left, gles[less_ids], gres[less_ids],
+                      gids[less_ids], rank, size)
+
+        greater_ids = gres[:,node.split.dim] > node.split.pos
+        if len(greater_ids) > 0:
+            add_grids(node.right, gles[greater_ids], gres[greater_ids],
+                      gids[greater_ids], rank, size)
+
+def should_i_split(node, rank, size):
+    return node.id < size
+
+def geo_split(node, gles, gres, grid_ids, rank, size):
+    big_dim = np.argmax(gres[0]-gles[0])
+    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+    old_gre = gres[0].copy()
+    new_gle = gles[0].copy()
+    new_gle[big_dim] = new_pos
+    gres[0][big_dim] = new_pos
+    gles = np.append(gles, np.array([new_gle]), axis=0)
+    gres = np.append(gres, np.array([old_gre]), axis=0)
+    grid_ids = np.append(grid_ids, grid_ids, axis=0)
+
+    split = Split(big_dim, new_pos)
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[:1], gres[:1],
+            grid_ids[:1], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[1:], gres[1:],
+            grid_ids[1:], rank, size)
+    return
+
+def insert_grids(node, gles, gres, grid_ids, rank, size):
+    if not should_i_build(node, rank, size) or grid_ids.size == 0:
+        return
+
+    if len(grid_ids) == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles, gres, grid_ids, rank, size)
+            return
+
+        if np.all(gles[0] <= node.left_edge) and \
+                np.all(gres[0] >= node.right_edge):
+            node.grid = grid_ids[0]
+            assert(node.grid is not None)
+            return
+
+    # Split the grids
+    check = split_grids(node, gles, gres, grid_ids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = None
+    return
+
+def split_grids(node, gles, gres, grid_ids, rank, size):
+    # Find a Split
+    data = np.array([(gles[i,:], gres[i,:]) for i in
+        xrange(grid_ids.shape[0])], copy=False)
+    best_dim, split_pos, less_ids, greater_ids = \
+        kdtree_get_choices(data, node.left_edge, node.right_edge)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        return -1
+
+    split = Split(best_dim, split_pos)
+
+    del data, best_dim, split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, gles[less_ids], gres[less_ids],
+                 grid_ids[less_ids], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
+                 grid_ids[greater_ids], rank, size)
+
+    return
+
+def new_right(Node, split):
+    new_right = Node.right_edge.copy()
+    new_right[split.dim] = split.pos
+    return new_right
+
+def new_left(Node, split):
+    new_left = Node.left_edge.copy()
+    new_left[split.dim] = split.pos
+    return new_left
+
+def divide(node, split):
+    # Create a Split
+    node.split = split
+    node.left = Node(node, None, None,
+            node.left_edge, new_right(node, split), node.grid,
+                     _lchild_id(node.id))
+    node.right = Node(node, None, None,
+            new_left(node, split), node.right_edge, node.grid,
+                      _rchild_id(node.id))
+    return
+
+def kd_sum_volume(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_sum_cells(node):
+    if (node.left is None) and (node.right is None):
+        if node.grid is None:
+            return 0.0
+        return np.prod(node.right_edge - node.left_edge)
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+
+def kd_node_check(node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid is not None:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(node):
+    has_l_child = node.left is None
+    has_r_child = node.right is None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(current, previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+
+def depth_traverse(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(current, previous, viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+
+def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+    arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+        (image.shape[0], image.shape[1], image.shape[2]))
+
+    if add_to_front:
+        front = arr2
+        back = image
+    else:
+        front = image
+        back = arr2
+
+    if image.shape[2] == 3:
+        # Assume Projection Camera, Add
+        np.add(image, front, image)
+        return image
+
+    ta = 1.0 - front[:,:,3]
+    np.maximum(ta, 0.0, ta)
+    # This now does the following calculation, but in a memory
+    # conservative fashion
+    # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
+    image = back.copy()
+    for i in range(4):
+        np.multiply(image[:,:,i], ta, image[:,:,i])
+    np.add(image, front, image)
+    return image
+
+def send_to_parent(comm, outgoing_rank, image):
+    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    comm.send_array(image, outgoing_rank, tag=comm.rank)
+
+def scatter_image(comm, root, image):
+    mylog.debug( 'Scattering from %04i' % root)
+    image = comm.mpi_bcast(image, root=root)
+    return image
+
+def find_node(node, pos):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(np.all(node.left_edge <= pos))
+    assert(np.all(node.right_edge > pos))
+    while not kd_is_leaf(node):
+        if pos[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list