[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 2 06:59:00 PDT 2013


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/fcce833dd247/
Changeset:   fcce833dd247
Branch:      stable
User:        MatthewTurk
Date:        2013-07-02 15:57:23
Summary:     Merging from development branch for 2.5.4.
Affected #:  26 files

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
         self.layers.append(base_layer)
         self.cell_count += np.product(pf.domain_dimensions)
 
-        for grid in pf.h.grids:
+        sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+        for grid in sorted_grids:
             if grid.Level <= self.max_level:
                 self._add_grid_to_layers(grid)
 
@@ -232,11 +233,11 @@
             if p == 0:
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
-                LE = np.zeros(3)
+                parent_LE = np.zeros(3)
                 for potential_parent in self.layers:
                     if potential_parent.id == p:
-                        LE = potential_parent.LeftEdge
-                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+                        parent_LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
             ix  = int(ind[0]+0.5)
             iy  = int(ind[1]+0.5)
             iz  = int(ind[2]+0.5)

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3121,6 +3121,8 @@
     In-line extracted regions accept a base region and a set of field_cuts to
     determine which points in a grid should be included.
     """
+    _type_name = "cut_region"
+    _con_args = ("_base_region", "_field_cuts")
     def __init__(self, base_region, field_cuts, **kwargs):
         cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1082,7 +1082,7 @@
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 
-add_field("BRadial", function=_BPoloidal,
+add_field("BRadial", function=_BRadial,
           units=r"\rm{Gauss}",
           validators=[ValidateParameter("normal")])
 

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -37,7 +37,8 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh
+    mh, \
+    kpc_per_cm
 from yt.funcs import *
 
 import yt.utilities.lib as amr_utils
@@ -77,6 +78,7 @@
 
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
+
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
@@ -315,6 +317,10 @@
 
 def _convertDensity(data):
     return data.convert("Density")
+
+def _convertCmToKpc(data):
+    return 1/(kpc_per_cm)**3
+
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
         ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
@@ -365,8 +371,35 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("star_density", function=_spdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Stellar\/Density")
+
+def _tpdensity(field, data): 
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
+    if data["particle_position_x"].size == 0: return blank
+    filter = data['particle_type'] == 3 # tracer particles
+    if not filter.any(): return blank
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           np.ones(filter.sum(), dtype="float64"),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    blank /= data['CellVolume']
+    return blank
+
+add_field("tracer_number_density", function=_tpdensity,
+          validators=[ValidateSpatial(0)], convert_function=_convertCmToKpc,
+          units = r"\rm{particles}/\rm{kpc}^3",
+          projected_units = r"\rm{particles}/\rm{kpc}^2",
+          display_name = "Tracer\/Particle\/Number\/Density",
+          projection_conversion='kpc')
 
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
@@ -387,8 +420,12 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("dm_density", function=_dmpdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Dark\/Matter\/Density")
 
 def _cic_particle_field(field, data):
     """

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -458,7 +458,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = self.parameters['redshift']
+            self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -74,8 +74,9 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.data_software != "piernik":
+            if self.pf.dimensionality < 2: self.dds[1] = 1.0
+            if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):
@@ -214,6 +215,11 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
+        if 'data_software' in self._handle['gridded_data_format'].attrs:
+            self.data_software = \
+                self._handle['gridded_data_format'].attrs['data_software']
+        else:
+            self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -98,7 +98,17 @@
         if param == "loglevel": # special case
             mylog.setLevel(int(val))
 
-parser = argparse.ArgumentParser(description = 'yt command line arguments')
+class YTParser(argparse.ArgumentParser):
+    def error(self, message):
+        """error(message: string)
+
+        Prints a help message that is more detailed than the argparse default
+        and then exits.
+        """
+        self.print_help(sys.stderr)
+        self.exit(2, '%s: error: %s\n' % (self.prog, message))
+
+parser = YTParser(description = 'yt command line arguments')
 parser.add_argument("--config", action=SetConfigOption,
     help = "Set configuration option, in the form param=value")
 parser.add_argument("--paste", action=SetExceptionHandling,

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,13 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+from amr_kdtools import \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +70,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +93,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +102,35 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
 
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -134,19 +141,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):
@@ -204,14 +212,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -232,13 +234,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -248,33 +250,32 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
-                                  image, add_to_front)
-                if node.parent.id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
-                break
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +293,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -405,7 +406,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -426,8 +427,8 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
-                if node.grid is not None:
+                i = node.node_id
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
@@ -476,32 +477,28 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+        for node in depth_first_touch(self.tree.trunk):
+            nids.append(node.node_id) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -518,19 +515,23 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 80de2ef3fe6db17e6fe8e3264ad65b686c221900 -r fcce833dd247341eaa4a89d5e36a00ff26819f45 yt/utilities/lib/amr_kdtools.pyx
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -0,0 +1,921 @@
+"""
+AMR kD-Tree Cython Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+
+DEF Nch = 4
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef class Node:
+
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public np.int64_t node_id
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public data
+    cdef Split * split
+
+    def __cinit__(self, 
+                  Node parent, 
+                  Node left, 
+                  Node right, 
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  int grid,
+                  np.int64_t node_id):
+        self.left = left
+        self.right = right
+        self.parent = parent
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.grid = grid
+        self.node_id = node_id
+        self.split == NULL
+
+    def print_me(self):
+        print 'Node %i' % self.node_id
+        print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], 
+                                   self.left_edge[2])
+        print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], 
+                                   self.right_edge[2])
+        print '\t grid: %i' % self.grid
+
+    def get_split_dim(self):
+        if self.split != NULL:
+            return self.split.dim
+        else:
+            return -1
+    
+    def get_split_pos(self):
+        if self.split != NULL:
+            return self.split.pos
+        else:
+            return np.nan
+
+    def get_left_edge(self):
+        return get_left_edge(self)
+    
+    def get_right_edge(self):
+        return get_right_edge(self)
+
+    def set_left_edge(self, np.ndarray[np.float64_t, ndim=1] left_edge):
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+    
+    def set_right_edge(self, np.ndarray[np.float64_t, ndim=1] right_edge):
+        cdef int i
+        for i in range(3):
+            self.right_edge[i] = right_edge[i]
+
+    def create_split(self, dim, pos):
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = dim 
+        split.pos = pos
+        self.split = split
+
+    def __dealloc__(self):
+        if self.split != NULL: free(self.split)
+
+def get_left_edge(Node node):
+    le = np.empty(3, dtype='float64')
+    for i in range(3):
+        le[i] = node.left_edge[i]
+    return le
+
+def get_right_edge(Node node):
+    re = np.empty(3, dtype='float64')
+    for i in range(3):
+        re[i] = node.right_edge[i]
+    return re
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _lchild_id(np.int64_t node_id):
+    return (node_id<<1)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _rchild_id(np.int64_t node_id):
+    return (node_id<<1) + 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _parent_id(np.int64_t node_id):
+    return (node_id-1) >> 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_build(Node node, int rank, int size):
+    if (node.node_id < size) or (node.node_id >= 2*size):
+        return 1 
+    elif node.node_id - size == rank:
+        return 1 
+    else:
+        return 0 
+
+def kd_traverse(Node trunk, viewpoint=None):
+    if viewpoint is None:
+        for node in depth_traverse(trunk):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+    else:
+        for node in viewpoint_traverse(trunk, viewpoint):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grid(Node node, 
+                   np.float64_t *gle, 
+                   np.float64_t *gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grid(node, gle, gre, gid, rank, size)
+    else:
+        less_id = gle[node.split.dim] < node.split.pos
+        if less_id:
+            add_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+        greater_id = gre[node.split.dim] > node.split.pos
+        if greater_id:
+            add_grid(node.right, gle, gre,
+                     gid, rank, size)
+    return
+
+def add_pygrid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    cdef int j
+    for j in range(3):
+        pgles[j] = gle[j]
+        pgres[j] = gre[j]
+
+    add_grid(node, pgles, pgres, gid, rank, size)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef insert_grid(Node node, 
+                np.float64_t *gle, 
+                np.float64_t *gre, 
+                int grid_id, 
+                int rank,
+                int size):
+    if not should_i_build(node, rank, size):
+        return
+
+    # If we should continue to split based on parallelism, do so!
+    if should_i_split(node, rank, size):
+        geo_split(node, gle, gre, grid_id, rank, size)
+        return
+
+    cdef int contained = 1
+    for i in range(3):
+        if gle[i] > node.left_edge[i] or\
+           gre[i] < node.right_edge[i]:
+            contained *= 0
+
+    if contained == 1:
+        node.grid = grid_id 
+        assert(node.grid != -1)
+        return
+
+    # Split the grid
+    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1 
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_pygrids(Node node, 
+                    int ngrids,
+                    np.ndarray[np.float64_t, ndim=2] gles, 
+                    np.ndarray[np.float64_t, ndim=2] gres, 
+                    np.ndarray[np.int64_t, ndim=1] gids, 
+                    int rank,
+                    int size):
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    for i in range(ngrids):
+        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgids[i] = gids[i]
+        for j in range(3):
+            pgles[i][j] = gles[i, j]
+            pgres[i][j] = gres[i, j]
+
+    add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
+
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grids(Node node, 
+                    int ngrids,
+                    np.float64_t **gles, 
+                    np.float64_t **gres, 
+                    np.int64_t *gids, 
+                    int rank,
+                    int size):
+    cdef int i, j, nless, ngreater
+    cdef np.int64_t gid
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, ngrids, gles, gres, gids, rank, size)
+        return
+
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if gles[i][node.split.dim] < node.split.pos:
+            less_ids[nless] = i
+            nless += 1
+            
+        if gres[i][node.split.dim] > node.split.pos:
+            greater_ids[ngreater] = i
+            ngreater += 1
+
+    #print 'nless: %i' % nless
+    #print 'ngreater: %i' % ngreater
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_ids[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        add_grids(node.left, nless, less_gles, less_gres,
+                  l_ids, rank, size)
+            
+    for i in range(ngreater):
+        index = greater_ids[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        add_grids(node.right, ngreater, greater_gles, greater_gres,
+                  g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_ids)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_ids)
+    free(greater_gles)
+    free(greater_gres)
+
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_split(Node node, int rank, int size):
+    if node.node_id < size and node.node_id > 0:
+        return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void insert_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    
+    if not should_i_build(node, rank, size) or ngrids == 0:
+        return
+    cdef int contained = 1
+    cdef int check
+
+    if ngrids == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles[0], gres[0], gids[0], rank, size)
+            return
+
+        for i in range(3):
+            contained *= gles[0][i] <= node.left_edge[i]
+            contained *= gres[0][i] >= node.right_edge[i]
+    
+        if contained == 1:
+            # print 'Node fully contained, setting to grid: %i' % gids[0]
+            node.grid = gids[0]
+            assert(node.grid != -1)
+            return
+
+    # Split the grids
+    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef split_grid(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int gid, 
+               int rank,
+               int size):
+
+    cdef int j
+    data = <np.float64_t ***> alloca(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+    for j in range(2):
+        data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    for j in range(3):
+        data[0][0][j] = gle[j]
+        data[0][1][j] = gre[j]
+
+    less_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if nless == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if ngreater == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids, 
+                        np.float64_t ***data,
+                        np.float64_t *l_corner,
+                        np.float64_t *r_corner,
+                        np.uint8_t *less_ids,
+                        np.uint8_t *greater_ids,
+                       ):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i][j][dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i][j][dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef int nless=0, ngreater=0
+    for i in range(n_grids):
+        if data[i][0][best_dim] < split:
+            less_ids[i] = 1
+            nless += 1
+        else:
+            less_ids[i] = 0
+        if data[i][1][best_dim] > split:
+            greater_ids[i] = 1
+            ngreater += 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, nless, ngreater
+
+#@cython.boundscheck(False)
+#@cython.wraparound(False)
+#@cython.cdivision(True)
+cdef int split_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    # Find a Split
+    cdef int i, j, k
+
+    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
+    for i in range(ngrids):
+        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+        for j in range(2):
+            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        for j in range(3):
+            data[i][0][j] = gles[i][j]
+            data[i][1][j] = gres[i][j]
+
+    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+ 
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grids.'
+        return -1
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if less_ids[i] == 1:
+            less_index[nless] = i
+            nless += 1
+
+        if greater_ids[i] == 1:
+            greater_index[ngreater] = i
+            ngreater += 1
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_index[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        # Populate Left Node
+        #print 'Inserting left node', node.left_edge, node.right_edge
+        insert_grids(node.left, nless, less_gles, less_gres,
+                     l_ids, rank, size)
+
+    for i in range(ngreater):
+        index = greater_index[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        # Populate Right Node
+        #print 'Inserting right node', node.left_edge, node.right_edge
+        insert_grids(node.right, ngreater, greater_gles, greater_gres,
+                     g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_index)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_index)
+    free(greater_gles)
+    free(greater_gres)
+
+
+    return 0
+
+cdef geo_split(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int grid_id, 
+               int rank, 
+               int size):
+    cdef int big_dim = 0
+    cdef int i
+    cdef np.float64_t v, my_max = 0.0
+    
+    for i in range(3):
+        v = gre[i] - gle[i]
+        if v > my_max:
+            my_max = v
+            big_dim = i
+
+    new_pos = (gre[big_dim] + gle[big_dim])/2.
+    
+    lnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+
+    for j in range(3):
+        lnew_gle[j] = gle[j]
+        lnew_gre[j] = gre[j]
+        rnew_gle[j] = gle[j]
+        rnew_gre[j] = gre[j]
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = big_dim 
+    split.pos = new_pos
+
+    # Create a Split
+    divide(node, split)
+
+    #lnew_gre[big_dim] = new_pos
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grid(node.left, lnew_gle, lnew_gre, 
+            grid_id, rank, size)
+
+    #rnew_gle[big_dim] = new_pos 
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grid(node.right, rnew_gle, rnew_gre,
+            grid_id, rank, size)
+    return
+
+cdef void divide(Node node, Split * split):
+    # Create a Split
+    node.split = split
+    
+    cdef np.ndarray[np.float64_t, ndim=1] le = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] re = np.zeros(3, dtype='float64')
+
+    cdef int i
+    for i in range(3):
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+    re[split.dim] = split.pos
+
+    node.left = Node(node, None, None,
+                     le, re, node.grid,
+                     _lchild_id(node.node_id))
+
+    re[split.dim] = node.right_edge[split.dim]
+    le[split.dim] = split.pos
+    node.right = Node(node, None, None,
+                      le, re, node.grid,
+                      _rchild_id(node.node_id))
+
+    return
+# 
+def kd_sum_volume(Node node):
+    cdef np.float64_t vol = 1.0
+    if (node.left is None) and (node.right is None):
+        if node.grid == -1:
+            return 0.0
+        for i in range(3):
+            vol *= node.right_edge[i] - node.left_edge[i]
+        return vol 
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_node_check(Node node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid != -1:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(Node node):
+    cdef int has_l_child = node.left == None
+    cdef int has_r_child = node.right == None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+ 
+def depth_traverse(Node trunk, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(Node tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(Node tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(Node tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(Node current, 
+                   Node previous, 
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+cdef int point_in_node(Node node, 
+                       np.ndarray[np.float64_t, ndim=1] point):
+    cdef int i
+    cdef int inside = 1
+    for i in range(3):
+        inside *= node.left_edge[i] <= point[i]
+        inside *= node.right_edge[i] > point[i]
+    return inside
+
+
+def find_node(Node node,
+              np.ndarray[np.float64_t, ndim=1] point):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(point_in_node(node, point))
+    while not kd_is_leaf(node):
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+
+

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/4da03e5f00b6/
Changeset:   4da03e5f00b6
Branch:      stable
User:        MatthewTurk
Date:        2013-07-02 15:58:39
Summary:     Updating setup.py to 2.5.4 and changing description.
Affected #:  1 file

diff -r fcce833dd247341eaa4a89d5e36a00ff26819f45 -r 4da03e5f00b68c3a52107ff75ce48b09360b30c2 setup.py
--- a/setup.py
+++ b/setup.py
@@ -155,7 +155,7 @@
 
 import setuptools
 
-VERSION = "2.5.3"
+VERSION = "2.5.4"
 
 if os.path.exists('MANIFEST'):
     os.remove('MANIFEST')
@@ -242,8 +242,7 @@
         name="yt",
         version=VERSION,
         description="An analysis and visualization toolkit for Astrophysical "
-                    + "simulations, focusing on Adaptive Mesh Refinement data "
-                      "from Enzo, Orion, FLASH, and others.",
+                    + "simulations.",
         classifiers=["Development Status :: 5 - Production/Stable",
                      "Environment :: Console",
                      "Intended Audience :: Science/Research",


https://bitbucket.org/yt_analysis/yt/commits/cde0a641f1bf/
Changeset:   cde0a641f1bf
Branch:      stable
User:        MatthewTurk
Date:        2013-07-02 15:58:43
Summary:     Added tag yt-2.5.4 for changeset 4da03e5f00b6
Affected #:  1 file

diff -r 4da03e5f00b68c3a52107ff75ce48b09360b30c2 -r cde0a641f1bf276b5dcd502d3fef030fb9172db1 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5166,3 +5166,4 @@
 34a5e6774ceb26896c9d767563951d185a720774 yt-2.5.1
 2197c101413723de13e1d0dea153b182342ff719 yt-2.5.2
 59aa6445b5f4a26ecb2449f913c7f2b5fee04bee yt-2.5.3
+4da03e5f00b68c3a52107ff75ce48b09360b30c2 yt-2.5.4

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list