[yt-svn] commit/yt-3.0: 81 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Jul 12 09:15:16 PDT 2013


81 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/f42298968525/
Changeset:   f42298968525
Branch:      yt
User:        ngoldbaum
Date:        2013-06-06 07:58:21
Summary:     Making the test directories subpackages.
Affected #:  6 files

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,7 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -56,6 +56,7 @@
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
+    config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r f4229896852549f3d284d33e42d46175ab4ef8e4 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])


https://bitbucket.org/yt_analysis/yt-3.0/commits/bb30e1a5937e/
Changeset:   bb30e1a5937e
Branch:      yt
User:        ngoldbaum
Date:        2013-06-06 09:15:01
Summary:     Adding run_nose convenience function.

This makes it possible to run nosetests from inside a running python instance:

>>> import yt
>>> yt.run_nose()
................ ... followed by 2000 more periods.
Affected #:  1 file

diff -r f4229896852549f3d284d33e42d46175ab4ef8e4 -r bb30e1a5937e926d19bf25247556335f2b1aa84b yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress


https://bitbucket.org/yt_analysis/yt-3.0/commits/bc3f6a77ac2a/
Changeset:   bc3f6a77ac2a
Branch:      yt
User:        ngoldbaum
Date:        2013-06-09 00:34:01
Summary:     Fixing the callback docstrings.
Affected #:  1 file

diff -r bb30e1a5937e926d19bf25247556335f2b1aa84b -r bc3f6a77ac2a5b23092c9a7aa771ac5438635b33 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -717,7 +717,7 @@
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
             callback = invalidate_plot(apply_callback(CallbackMaker))
-            callback.__doc__ = CallbackMaker.__init__.__doc__
+            callback.__doc__ = CallbackMaker.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
     @invalidate_plot


https://bitbucket.org/yt_analysis/yt-3.0/commits/c4ac1f45dff0/
Changeset:   c4ac1f45dff0
Branch:      yt
User:        ngoldbaum
Date:        2013-06-12 23:26:05
Summary:     Correcting a typo in the dynamic scheduler.
Affected #:  1 file

diff -r bc3f6a77ac2a5b23092c9a7aa771ac5438635b33 -r c4ac1f45dff060da1de9df992eee835cdf9169ce yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -133,14 +133,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))
@@ -161,14 +161,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))


https://bitbucket.org/yt_analysis/yt-3.0/commits/fde7e81f6a77/
Changeset:   fde7e81f6a77
Branch:      yt
User:        samskillman
Date:        2013-06-10 22:57:55
Summary:     Adding initial support for drawing r,g,b vectors on the image denoting the x,y,z
data coordinate vectors.
Affected #:  2 files

diff -r bc3f6a77ac2a5b23092c9a7aa771ac5438635b33 -r fde7e81f6a7766f62117456ffad0a077f4c8cbd4 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -130,7 +130,7 @@
     cdef int i, j
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
-    cdef int has_alpha = (image.shape[-1] == 4)
+    cdef int has_alpha = (image.shape[2] == 4)
     for j in range(0, nl, 2):
         # From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
         x0 = xs[j]; y0 = ys[j]; x1 = xs[j+1]; y1 = ys[j+1]
@@ -160,7 +160,7 @@
             if (x0 >=0 and x0 < nx and y0 >= 0 and y0 < ny):
                 if has_alpha:
                     for i in range(4):
-                        image[x0,y0,i] = (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+                        image[x0,y0,i] = (1.-alpha[3])*image[x0,y0,i] + alpha[i]
                 else:
                     for i in range(3):
                         image[x0,y0,i] = (1.-alpha[i])*image[x0,y0,i] + alpha[i]

diff -r bc3f6a77ac2a5b23092c9a7aa771ac5438635b33 -r fde7e81f6a7766f62117456ffad0a077f4c8cbd4 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -342,6 +342,32 @@
         lines(nim, px, py, colors, 24)
         return nim
 
+    def draw_coordinate_vectors(self, im):
+        center = self.origin
+        width = self.width
+        length = 0.05*self.resolution[0]
+        # Put the starting point in the lower left
+        px0 = int(0.05 * self.resolution[0])
+        py0 = int(0.95 * self.resolution[1])
+
+        alpha = im[:,:,3].max()
+        if alpha == 0.0:
+            alpha = 1.0
+
+        coord_vectors = [np.array([length, 0.0, 0.0]),
+                         np.array([0.0, length, 0.0]),
+                         np.array([0.0, 0.0, length])]
+        colors = [np.array([1.0, 0.0, 0.0, alpha]),
+                  np.array([0.0, 1.0, 0.0, alpha]),
+                  np.array([0.0, 0.0, 1.0, alpha])]
+
+        for vec, color in zip(coord_vectors, colors):
+            dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
+            dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
+            print px0, py0, dx, dy, color
+            lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]), 
+                  np.array([color, color]))
+
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/c6fb84547b7a/
Changeset:   c6fb84547b7a
Branch:      yt
User:        samskillman
Date:        2013-06-13 01:18:57
Summary:     Adding ability to draw thick lines.
Affected #:  2 files

diff -r fde7e81f6a7766f62117456ffad0a077f4c8cbd4 -r c6fb84547b7af13bb6dafa1c82b77e5d4858c262 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -121,7 +121,8 @@
           np.ndarray[np.int64_t, ndim=1] xs,
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
-          int points_per_color=1):
+          int points_per_color=1,
+          int thick=1):
 
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
@@ -153,17 +154,21 @@
         else:
             sy = -1
         while(1):
-            if (x0 < 0 and sx == -1): break
-            elif (x0 >= nx and sx == 1): break
-            elif (y0 < 0 and sy == -1): break
-            elif (y0 >= nx and sy == 1): break
-            if (x0 >=0 and x0 < nx and y0 >= 0 and y0 < ny):
+            if (x0 < thick and sx == -1): break
+            elif (x0 >= nx-thick+1 and sx == 1): break
+            elif (y0 < thick and sy == -1): break
+            elif (y0 >= ny-thick+1 and sy == 1): break
+            if (x0 >=thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick):
                 if has_alpha:
                     for i in range(4):
-                        image[x0,y0,i] = (1.-alpha[3])*image[x0,y0,i] + alpha[i]
+                        image[x0-thick/2:x0+(1+thick)/2, 
+                              y0-thick/2:y0+(1+thick)/2,i] = \
+                                (1.-alpha[3])*image[x0,y0,i] + alpha[i]
                 else:
                     for i in range(3):
-                        image[x0,y0,i] = (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+                        image[x0-thick/2:x0+(1+thick)/2, 
+                              y0-thick/2:y0+(1+thick)/2,i] = \
+                                (1.-alpha[i])*image[x0,y0,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break

diff -r fde7e81f6a7766f62117456ffad0a077f4c8cbd4 -r c6fb84547b7af13bb6dafa1c82b77e5d4858c262 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -342,21 +342,20 @@
         lines(nim, px, py, colors, 24)
         return nim
 
-    def draw_coordinate_vectors(self, im):
-        center = self.origin
-        width = self.width
-        length = 0.05*self.resolution[0]
+    def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
+        length_pixels = length * self.resolution[0]
         # Put the starting point in the lower left
-        px0 = int(0.05 * self.resolution[0])
-        py0 = int(0.95 * self.resolution[1])
+        px0 = int(length * self.resolution[0])
+        # CS coordinates!
+        py0 = int((1.0-length) * self.resolution[1])
 
-        alpha = im[:,:,3].max()
+        alpha = im[:, :, 3].max()
         if alpha == 0.0:
             alpha = 1.0
 
-        coord_vectors = [np.array([length, 0.0, 0.0]),
-                         np.array([0.0, length, 0.0]),
-                         np.array([0.0, 0.0, length])]
+        coord_vectors = [np.array([length_pixels, 0.0, 0.0]),
+                         np.array([0.0, length_pixels, 0.0]),
+                         np.array([0.0, 0.0, length_pixels])]
         colors = [np.array([1.0, 0.0, 0.0, alpha]),
                   np.array([0.0, 1.0, 0.0, alpha]),
                   np.array([0.0, 0.0, 1.0, alpha])]
@@ -366,7 +365,7 @@
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             print px0, py0, dx, dy, color
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]), 
-                  np.array([color, color]))
+                  np.array([color, color]), 1, thickness)
 
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.


https://bitbucket.org/yt_analysis/yt-3.0/commits/cf52f60523b1/
Changeset:   cf52f60523b1
Branch:      yt
User:        samskillman
Date:        2013-06-13 20:27:49
Summary:     Adding docstrings to coordinate vector drawing.
Affected #:  1 file

diff -r c6fb84547b7af13bb6dafa1c82b77e5d4858c262 -r cf52f60523b1826cfcaca7fae21c330d14a5bb81 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -343,6 +343,38 @@
         return nim
 
     def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
+        r"""Draws three coordinate vectors in the corner of a rendering.
+
+        Modifies an existing image to have three lines corresponding to the
+        coordinate directions colored by {x,y,z} = {r,g,b}.  Currently only
+        functional for plane-parallel volume rendering.
+
+        Parameters
+        ----------
+        im: Numpy ndarray
+            Existing image that has the same resolution as the Camera,
+            which will be painted by grid lines.
+        length: float, optional
+            The length of the lines, as a fraction of the image size.
+            Default : 0.05
+        thickness : int, optional
+            Thickness in pixels of the line to be drawn.
+
+        Returns
+        -------
+        None
+
+        Modifies
+        --------
+        im: The original image.
+
+        Examples
+        --------
+        >>> im = cam.snapshot()
+        >>> cam.draw__coordinate_vectors(im)
+        >>> im.write_png('render_with_grids.png')
+
+        """
         length_pixels = length * self.resolution[0]
         # Put the starting point in the lower left
         px0 = int(length * self.resolution[0])
@@ -364,7 +396,7 @@
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
             print px0, py0, dx, dy, color
-            lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]), 
+            lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
                   np.array([color, color]), 1, thickness)
 
     def draw_line(self, im, x0, x1, color=None):


https://bitbucket.org/yt_analysis/yt-3.0/commits/636dc8ccfad0/
Changeset:   636dc8ccfad0
Branch:      yt
User:        samskillman
Date:        2013-06-13 20:47:40
Summary:     Removing errant print.
Affected #:  1 file

diff -r cf52f60523b1826cfcaca7fae21c330d14a5bb81 -r 636dc8ccfad05fb870397ee46d42214dd604d89c yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -395,7 +395,6 @@
         for vec, color in zip(coord_vectors, colors):
             dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
             dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
-            print px0, py0, dx, dy, color
             lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
                   np.array([color, color]), 1, thickness)
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/252591eeb6a2/
Changeset:   252591eeb6a2
Branch:      yt
User:        ngoldbaum
Date:        2013-06-14 05:26:37
Summary:     Merged in samskillman/yt (pull request #530)

Draw coordinate vectors on renders
Affected #:  2 files

diff -r c4ac1f45dff060da1de9df992eee835cdf9169ce -r 252591eeb6a2c0014ddb3a87adb04acf7eeefa97 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -121,7 +121,8 @@
           np.ndarray[np.int64_t, ndim=1] xs,
           np.ndarray[np.int64_t, ndim=1] ys,
           np.ndarray[np.float64_t, ndim=2] colors,
-          int points_per_color=1):
+          int points_per_color=1,
+          int thick=1):
 
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
@@ -130,7 +131,7 @@
     cdef int i, j
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
-    cdef int has_alpha = (image.shape[-1] == 4)
+    cdef int has_alpha = (image.shape[2] == 4)
     for j in range(0, nl, 2):
         # From wikipedia http://en.wikipedia.org/wiki/Bresenham's_line_algorithm
         x0 = xs[j]; y0 = ys[j]; x1 = xs[j+1]; y1 = ys[j+1]
@@ -153,17 +154,21 @@
         else:
             sy = -1
         while(1):
-            if (x0 < 0 and sx == -1): break
-            elif (x0 >= nx and sx == 1): break
-            elif (y0 < 0 and sy == -1): break
-            elif (y0 >= nx and sy == 1): break
-            if (x0 >=0 and x0 < nx and y0 >= 0 and y0 < ny):
+            if (x0 < thick and sx == -1): break
+            elif (x0 >= nx-thick+1 and sx == 1): break
+            elif (y0 < thick and sy == -1): break
+            elif (y0 >= ny-thick+1 and sy == 1): break
+            if (x0 >=thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick):
                 if has_alpha:
                     for i in range(4):
-                        image[x0,y0,i] = (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+                        image[x0-thick/2:x0+(1+thick)/2, 
+                              y0-thick/2:y0+(1+thick)/2,i] = \
+                                (1.-alpha[3])*image[x0,y0,i] + alpha[i]
                 else:
                     for i in range(3):
-                        image[x0,y0,i] = (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+                        image[x0-thick/2:x0+(1+thick)/2, 
+                              y0-thick/2:y0+(1+thick)/2,i] = \
+                                (1.-alpha[i])*image[x0,y0,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break

diff -r c4ac1f45dff060da1de9df992eee835cdf9169ce -r 252591eeb6a2c0014ddb3a87adb04acf7eeefa97 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -342,6 +342,62 @@
         lines(nim, px, py, colors, 24)
         return nim
 
+    def draw_coordinate_vectors(self, im, length=0.05, thickness=1):
+        r"""Draws three coordinate vectors in the corner of a rendering.
+
+        Modifies an existing image to have three lines corresponding to the
+        coordinate directions colored by {x,y,z} = {r,g,b}.  Currently only
+        functional for plane-parallel volume rendering.
+
+        Parameters
+        ----------
+        im: Numpy ndarray
+            Existing image that has the same resolution as the Camera,
+            which will be painted by grid lines.
+        length: float, optional
+            The length of the lines, as a fraction of the image size.
+            Default : 0.05
+        thickness : int, optional
+            Thickness in pixels of the line to be drawn.
+
+        Returns
+        -------
+        None
+
+        Modifies
+        --------
+        im: The original image.
+
+        Examples
+        --------
+        >>> im = cam.snapshot()
+        >>> cam.draw__coordinate_vectors(im)
+        >>> im.write_png('render_with_grids.png')
+
+        """
+        length_pixels = length * self.resolution[0]
+        # Put the starting point in the lower left
+        px0 = int(length * self.resolution[0])
+        # CS coordinates!
+        py0 = int((1.0-length) * self.resolution[1])
+
+        alpha = im[:, :, 3].max()
+        if alpha == 0.0:
+            alpha = 1.0
+
+        coord_vectors = [np.array([length_pixels, 0.0, 0.0]),
+                         np.array([0.0, length_pixels, 0.0]),
+                         np.array([0.0, 0.0, length_pixels])]
+        colors = [np.array([1.0, 0.0, 0.0, alpha]),
+                  np.array([0.0, 1.0, 0.0, alpha]),
+                  np.array([0.0, 0.0, 1.0, alpha])]
+
+        for vec, color in zip(coord_vectors, colors):
+            dx = int(np.dot(vec, self.orienter.unit_vectors[0]))
+            dy = int(np.dot(vec, self.orienter.unit_vectors[1]))
+            lines(im, np.array([px0, px0+dx]), np.array([py0, py0+dy]),
+                  np.array([color, color]), 1, thickness)
+
     def draw_line(self, im, x0, x1, color=None):
         r"""Draws a line on an existing volume rendering.
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/6a553070a5c2/
Changeset:   6a553070a5c2
Branch:      yt
User:        Andrew Myers
Date:        2013-06-13 20:39:37
Summary:     patching the RadMC3D exporter to work even if pf.h.grids are not already in order by level
Affected #:  1 file

diff -r 7d765411da341fa45519ecb3ab788d3701246eb7 -r 6a553070a5c20363b88148de7ee20bbe0d796fed yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
         self.layers.append(base_layer)
         self.cell_count += np.product(pf.domain_dimensions)
 
-        for grid in pf.h.grids:
+        sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+        for grid in sorted_grids:
             if grid.Level <= self.max_level:
                 self._add_grid_to_layers(grid)
 
@@ -232,11 +233,11 @@
             if p == 0:
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
-                LE = np.zeros(3)
+                parent_LE = np.zeros(3)
                 for potential_parent in self.layers:
                     if potential_parent.id == p:
-                        LE = potential_parent.LeftEdge
-                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+                        parent_LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
             ix  = int(ind[0]+0.5)
             iy  = int(ind[1]+0.5)
             iz  = int(ind[2]+0.5)


https://bitbucket.org/yt_analysis/yt-3.0/commits/3ab6b834fa3a/
Changeset:   3ab6b834fa3a
Branch:      yt
User:        atmyers
Date:        2013-06-13 20:40:24
Summary:     Merged yt_analysis/yt into yt
Affected #:  9 files

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,7 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -133,14 +133,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))
@@ -161,14 +161,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -56,6 +56,7 @@
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
+    config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -717,7 +717,7 @@
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
             callback = invalidate_plot(apply_callback(CallbackMaker))
-            callback.__doc__ = CallbackMaker.__init__.__doc__
+            callback.__doc__ = CallbackMaker.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
     @invalidate_plot

diff -r 6a553070a5c20363b88148de7ee20bbe0d796fed -r 3ab6b834fa3af6f84fbee8e3bed1db517f0a2c62 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])


https://bitbucket.org/yt_analysis/yt-3.0/commits/c23cd3cde4a4/
Changeset:   c23cd3cde4a4
Branch:      yt
User:        MatthewTurk
Date:        2013-06-14 13:49:26
Summary:     Merged in atmyers/yt-clean (pull request #529)

Patching the RadMC3D export code
Affected #:  1 file

diff -r 252591eeb6a2c0014ddb3a87adb04acf7eeefa97 -r c23cd3cde4a41d7eae1e0ca657729acfc1c947d1 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
         self.layers.append(base_layer)
         self.cell_count += np.product(pf.domain_dimensions)
 
-        for grid in pf.h.grids:
+        sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+        for grid in sorted_grids:
             if grid.Level <= self.max_level:
                 self._add_grid_to_layers(grid)
 
@@ -232,11 +233,11 @@
             if p == 0:
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
-                LE = np.zeros(3)
+                parent_LE = np.zeros(3)
                 for potential_parent in self.layers:
                     if potential_parent.id == p:
-                        LE = potential_parent.LeftEdge
-                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+                        parent_LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
             ix  = int(ind[0]+0.5)
             iy  = int(ind[1]+0.5)
             iz  = int(ind[2]+0.5)


https://bitbucket.org/yt_analysis/yt-3.0/commits/045ea2c21426/
Changeset:   045ea2c21426
Branch:      yt
User:        David Collins
Date:        2013-06-17 20:46:44
Summary:     adding _type_name and _con_args to InLineExtractedRegionBase
Affected #:  1 file

diff -r c23cd3cde4a41d7eae1e0ca657729acfc1c947d1 -r 045ea2c214262c4b6d024a5f88b5a94e838d60c6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3121,6 +3121,8 @@
     In-line extracted regions accept a base region and a set of field_cuts to
     determine which points in a grid should be included.
     """
+    _type_name = "cut_region"
+    _con_args = ("_base_region", "_field_cuts")
     def __init__(self, base_region, field_cuts, **kwargs):
         cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,


https://bitbucket.org/yt_analysis/yt-3.0/commits/f16ce48c7650/
Changeset:   f16ce48c7650
Branch:      yt
User:        chummels
Date:        2013-06-18 11:41:03
Summary:     Adding tracer particle field to enzo frontend.  One can access it with "tracer_density".
Affected #:  1 file

diff -r 31ee3fa3dbd2fff189ad740ba726a91979afa16d -r f16ce48c76509dd1141edda954cdf4a1e878e131 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -77,9 +77,13 @@
 
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
+
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
+def _ConvertNone(data):
+    return 1.0*mh
+
 for species in _speciesList:
     add_field("%s_Fraction" % species,
              function=_SpeciesFraction,
@@ -365,9 +369,29 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("star_density", function=_spdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
+def _tpdensity(field, data):
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
+    if data["particle_position_x"].size == 0: return blank
+    filter = data['particle_type'] == 3
+    if not filter.any(): return blank
+    data["particle_mass"][filter] = 1.0
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           data["particle_mass"][filter],
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    return blank
+
+add_field("tracer_density", function=_tpdensity,
+          validators=[ValidateSpatial(0)], convert_function=_ConvertNone)
+
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank


https://bitbucket.org/yt_analysis/yt-3.0/commits/4736ea0c26cd/
Changeset:   4736ea0c26cd
Branch:      yt
User:        chummels
Date:        2013-06-18 11:51:17
Summary:     Fixing a bug with annotate_contour, wherein it didn't recognize one of the designated keywords: take_log.
Affected #:  1 file

diff -r f16ce48c76509dd1141edda954cdf4a1e878e131 -r 4736ea0c26cd5495867577322b0e8e397a92b711 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -229,7 +229,8 @@
     """
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None, label = False, label_args = None):
+                 plot_args = None, label = False, take_log = False, 
+                 label_args = None):
         PlotCallback.__init__(self)
         self.ncont = ncont
         self.field = field
@@ -237,6 +238,7 @@
         from yt.utilities.delaunay.triangulate import Triangulation as triang
         self.triang = triang
         self.clim = clim
+        self.take_log = take_log
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
         self.label = label
@@ -303,9 +305,9 @@
         elif plot._type_name == 'OffAxisProjection':
             zi = plot.frb[self.field][::self.factor,::self.factor].transpose()
         
-        if plot.pf.field_info[self.field].take_log: zi=np.log10(zi)
+        if self.take_log: zi=np.log10(zi)
 
-        if plot.pf.field_info[self.field].take_log and self.clim is not None: 
+        if self.take_log and self.clim is not None: 
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 


https://bitbucket.org/yt_analysis/yt-3.0/commits/622105751569/
Changeset:   622105751569
Branch:      yt
User:        chummels
Date:        2013-06-18 19:41:49
Summary:     Adding the ability to specify a bulk velocity to the annotate_velocity callback.
Affected #:  1 file

diff -r 4736ea0c26cd5495867577322b0e8e397a92b711 -r 62210575156928671c7e229af438fcdbf5340e14 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -129,7 +129,16 @@
         else:
             xv = "%s-velocity" % (x_names[plot.data.axis])
             yv = "%s-velocity" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units, normalize=self.normalize)
+
+            bv = plot.data.get_field_parameter("bulk_velocity")
+            if bv is not None:
+                bv_x = bv[x_dict[plot.data.axis]]
+                bv_y = bv[y_dict[plot.data.axis]]
+            else: bv_x = bv_y = 0
+
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, 
+                                 scale_units=self.scale_units, 
+                                 normalize=self.normalize, bv_x=bv_x, bv_y=bv_y)
         return qcb(plot)
 
 class MagFieldCallback(PlotCallback):
@@ -174,11 +183,12 @@
     (see matplotlib.axes.Axes.quiver for more info)
     """
     _type_name = "quiver"
-    def __init__(self, field_x, field_y, factor=16, scale=None, scale_units=None, normalize=False):
+    def __init__(self, field_x, field_y, factor=16, scale=None, scale_units=None, normalize=False, bv_x=0, bv_y=0):
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.bv_x = self.bv_y = 0
+        self.bv_x = bv_x
+        self.bv_y = bv_y
         self.factor = factor
         self.scale = scale
         self.scale_units = scale_units


https://bitbucket.org/yt_analysis/yt-3.0/commits/cabdb02d4ca3/
Changeset:   cabdb02d4ca3
Branch:      yt
User:        chummels
Date:        2013-06-18 22:49:45
Summary:     Making modifications as suggested by mjturk and ngoldbaum to tracer particle field.
Affected #:  1 file

diff -r 62210575156928671c7e229af438fcdbf5340e14 -r cabdb02d4ca339d4027cce312970c58a799a9813 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -81,9 +81,6 @@
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
-def _ConvertNone(data):
-    return 1.0*mh
-
 for species in _speciesList:
     add_field("%s_Fraction" % species,
              function=_SpeciesFraction,
@@ -319,6 +316,10 @@
 
 def _convertDensity(data):
     return data.convert("Density")
+
+def _convertCmToKpc(data):
+    return (3.08567758e21)**3
+
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
         ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
@@ -371,26 +372,33 @@
     return blank
 
 add_field("star_density", function=_spdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Stellar\/Density")
 
-def _tpdensity(field, data):
+def _tpdensity(field, data): 
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
     if data["particle_position_x"].size == 0: return blank
-    filter = data['particle_type'] == 3
+    filter = data['particle_type'] == 3 # tracer particles
     if not filter.any(): return blank
-    data["particle_mass"][filter] = 1.0
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
+                           np.ones(filter.sum(), dtype="float64"),
                            np.int64(np.where(filter)[0].size),
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
+    blank /= data['CellVolume']
     return blank
 
-add_field("tracer_density", function=_tpdensity,
-          validators=[ValidateSpatial(0)], convert_function=_ConvertNone)
+add_field("tracer_number_density", function=_tpdensity,
+          validators=[ValidateSpatial(0)], convert_function=_convertCmToKpc,
+          units = r"\rm{particles}/\rm{kpc}^3",
+          projected_units = r"\rm{particles}/\rm{kpc}^2",
+          display_name = "Tracer\/Particle\/Number\/Density",
+          projection_conversion='kpc')
 
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
@@ -411,8 +419,12 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("dm_density", function=_dmpdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Dark\/Matter\/Density")
 
 def _cic_particle_field(field, data):
     """


https://bitbucket.org/yt_analysis/yt-3.0/commits/80cee8e5d3b1/
Changeset:   80cee8e5d3b1
Branch:      yt
User:        chummels
Date:        2013-06-18 22:59:36
Summary:     Modifying annotate_contours to not default to false.
Affected #:  1 file

diff -r cabdb02d4ca339d4027cce312970c58a799a9813 -r 80cee8e5d3b120fbf86e40ed40219828fc4c639c yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -229,7 +229,7 @@
 
 class ContourCallback(PlotCallback):
     """
-    annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
+    annotate_contour(self, field, ncont=5, factor=4, take_log=None, clim=None,
                      plot_args = None):
 
     Add contours in *field* to the plot.  *ncont* governs the number of
@@ -239,7 +239,7 @@
     """
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None, label = False, take_log = False, 
+                 plot_args = None, label = False, take_log = None, 
                  label_args = None):
         PlotCallback.__init__(self)
         self.ncont = ncont
@@ -315,6 +315,9 @@
         elif plot._type_name == 'OffAxisProjection':
             zi = plot.frb[self.field][::self.factor,::self.factor].transpose()
         
+        if self.take_log is None:
+            self.take_log = plot.pf.field_info[self.field].take_log
+
         if self.take_log: zi=np.log10(zi)
 
         if self.take_log and self.clim is not None: 


https://bitbucket.org/yt_analysis/yt-3.0/commits/a6c00ea18a2b/
Changeset:   a6c00ea18a2b
Branch:      yt
User:        chummels
Date:        2013-06-19 00:03:40
Summary:     Making sure I use yt's defined physical_constants
Affected #:  1 file

diff -r 80cee8e5d3b120fbf86e40ed40219828fc4c639c -r a6c00ea18a2b86526d08ca7234cb0e988228cee8 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -37,7 +37,8 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh
+    mh, \
+    kpc_per_cm
 from yt.funcs import *
 
 import yt.utilities.lib as amr_utils
@@ -318,7 +319,7 @@
     return data.convert("Density")
 
 def _convertCmToKpc(data):
-    return (3.08567758e21)**3
+    return 1/(kpc_per_cm)**3
 
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
         ["SN_Colour"]:


https://bitbucket.org/yt_analysis/yt-3.0/commits/2c15a177fc68/
Changeset:   2c15a177fc68
Branch:      yt
User:        MatthewTurk
Date:        2013-06-19 15:30:47
Summary:     Merged in chummels/yt (pull request #532)

A few small things: tracer particle field, annotate_velocity, and annotate_contour updates
Affected #:  2 files

diff -r 045ea2c214262c4b6d024a5f88b5a94e838d60c6 -r 2c15a177fc684678b9c09120a6cf3b08ddb19457 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -37,7 +37,8 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
-    mh
+    mh, \
+    kpc_per_cm
 from yt.funcs import *
 
 import yt.utilities.lib as amr_utils
@@ -77,6 +78,7 @@
 
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
+
 def _ConvertNumberDensity(data):
     return 1.0/mh
 
@@ -315,6 +317,10 @@
 
 def _convertDensity(data):
     return data.convert("Density")
+
+def _convertCmToKpc(data):
+    return 1/(kpc_per_cm)**3
+
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ] + \
         ["SN_Colour"]:
     KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
@@ -365,8 +371,35 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("star_density", function=_spdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Stellar\/Density")
+
+def _tpdensity(field, data): 
+    blank = np.zeros(data.ActiveDimensions, dtype='float64')
+    if data["particle_position_x"].size == 0: return blank
+    filter = data['particle_type'] == 3 # tracer particles
+    if not filter.any(): return blank
+    amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
+                           data["particle_position_y"][filter].astype(np.float64),
+                           data["particle_position_z"][filter].astype(np.float64),
+                           np.ones(filter.sum(), dtype="float64"),
+                           np.int64(np.where(filter)[0].size),
+                           blank, np.array(data.LeftEdge).astype(np.float64),
+                           np.array(data.ActiveDimensions).astype(np.int32), 
+                           np.float64(data['dx']))
+    blank /= data['CellVolume']
+    return blank
+
+add_field("tracer_number_density", function=_tpdensity,
+          validators=[ValidateSpatial(0)], convert_function=_convertCmToKpc,
+          units = r"\rm{particles}/\rm{kpc}^3",
+          projected_units = r"\rm{particles}/\rm{kpc}^2",
+          display_name = "Tracer\/Particle\/Number\/Density",
+          projection_conversion='kpc')
 
 def _dmpdensity(field, data):
     blank = np.zeros(data.ActiveDimensions, dtype='float64')
@@ -387,8 +420,12 @@
                            np.array(data.ActiveDimensions).astype(np.int32), 
                            np.float64(data['dx']))
     return blank
+
 add_field("dm_density", function=_dmpdensity,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          units = r"\rm{g}/\rm{cm}^3",
+          projected_units = r"\rm{g}/\rm{cm}^2",
+          display_name = "Dark\/Matter\/Density")
 
 def _cic_particle_field(field, data):
     """

diff -r 045ea2c214262c4b6d024a5f88b5a94e838d60c6 -r 2c15a177fc684678b9c09120a6cf3b08ddb19457 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -129,7 +129,16 @@
         else:
             xv = "%s-velocity" % (x_names[plot.data.axis])
             yv = "%s-velocity" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units, normalize=self.normalize)
+
+            bv = plot.data.get_field_parameter("bulk_velocity")
+            if bv is not None:
+                bv_x = bv[x_dict[plot.data.axis]]
+                bv_y = bv[y_dict[plot.data.axis]]
+            else: bv_x = bv_y = 0
+
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, 
+                                 scale_units=self.scale_units, 
+                                 normalize=self.normalize, bv_x=bv_x, bv_y=bv_y)
         return qcb(plot)
 
 class MagFieldCallback(PlotCallback):
@@ -174,11 +183,12 @@
     (see matplotlib.axes.Axes.quiver for more info)
     """
     _type_name = "quiver"
-    def __init__(self, field_x, field_y, factor=16, scale=None, scale_units=None, normalize=False):
+    def __init__(self, field_x, field_y, factor=16, scale=None, scale_units=None, normalize=False, bv_x=0, bv_y=0):
         PlotCallback.__init__(self)
         self.field_x = field_x
         self.field_y = field_y
-        self.bv_x = self.bv_y = 0
+        self.bv_x = bv_x
+        self.bv_y = bv_y
         self.factor = factor
         self.scale = scale
         self.scale_units = scale_units
@@ -219,7 +229,7 @@
 
 class ContourCallback(PlotCallback):
     """
-    annotate_contour(self, field, ncont=5, factor=4, take_log=False, clim=None,
+    annotate_contour(self, field, ncont=5, factor=4, take_log=None, clim=None,
                      plot_args = None):
 
     Add contours in *field* to the plot.  *ncont* governs the number of
@@ -229,7 +239,8 @@
     """
     _type_name = "contour"
     def __init__(self, field, ncont=5, factor=4, clim=None,
-                 plot_args = None, label = False, label_args = None):
+                 plot_args = None, label = False, take_log = None, 
+                 label_args = None):
         PlotCallback.__init__(self)
         self.ncont = ncont
         self.field = field
@@ -237,6 +248,7 @@
         from yt.utilities.delaunay.triangulate import Triangulation as triang
         self.triang = triang
         self.clim = clim
+        self.take_log = take_log
         if plot_args is None: plot_args = {'colors':'k'}
         self.plot_args = plot_args
         self.label = label
@@ -303,9 +315,12 @@
         elif plot._type_name == 'OffAxisProjection':
             zi = plot.frb[self.field][::self.factor,::self.factor].transpose()
         
-        if plot.pf.field_info[self.field].take_log: zi=np.log10(zi)
+        if self.take_log is None:
+            self.take_log = plot.pf.field_info[self.field].take_log
 
-        if plot.pf.field_info[self.field].take_log and self.clim is not None: 
+        if self.take_log: zi=np.log10(zi)
+
+        if self.take_log and self.clim is not None: 
             self.clim = (np.log10(self.clim[0]), np.log10(self.clim[1]))
         
         if self.clim is not None: 


https://bitbucket.org/yt_analysis/yt-3.0/commits/3a638f82a37e/
Changeset:   3a638f82a37e
Branch:      yt
User:        ngoldbaum
Date:        2013-06-21 21:13:12
Summary:     Subclassing argparse.ArgumentParser to get a better error message.  Closes #596.

See http://stackoverflow.com/questions/4561580/how-to-let-the-parser-print-help-message-rather-than-error-and-exit
Affected #:  1 file

diff -r 2c15a177fc684678b9c09120a6cf3b08ddb19457 -r 3a638f82a37eb6a647ca4cda18446d94a055f221 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -98,7 +98,17 @@
         if param == "loglevel": # special case
             mylog.setLevel(int(val))
 
-parser = argparse.ArgumentParser(description = 'yt command line arguments')
+class YTParser(argparse.ArgumentParser):
+    def error(self, message):
+        """error(message: string)
+
+        Prints a help message that is more detailed than the argparse default
+        and then exits.
+        """
+        self.print_help(sys.stderr)
+        self.exit(2, '%s: error: %s\n' % (self.prog, message))
+
+parser = YTParser(description = 'yt command line arguments')
 parser.add_argument("--config", action=SetConfigOption,
     help = "Set configuration option, in the form param=value")
 parser.add_argument("--paste", action=SetExceptionHandling,


https://bitbucket.org/yt_analysis/yt-3.0/commits/8e8bd5b353df/
Changeset:   8e8bd5b353df
Branch:      yt
User:        atmyers
Date:        2013-06-27 23:52:50
Summary:     Shouldn't this be Radial instead of Poloidal?
Affected #:  1 file

diff -r 3a638f82a37eb6a647ca4cda18446d94a055f221 -r 8e8bd5b353df99c1510058a7c982329e5212fcfe yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1082,7 +1082,7 @@
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 
-add_field("BRadial", function=_BPoloidal,
+add_field("BRadial", function=_BRadial,
           units=r"\rm{Gauss}",
           validators=[ValidateParameter("normal")])
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/985e0f1536d4/
Changeset:   985e0f1536d4
Branch:      yt
User:        xarthisius
Date:        2013-06-22 11:43:35
Summary:     [gdf] read data_software string from gdf files
Affected #:  1 file

diff -r 3a638f82a37eb6a647ca4cda18446d94a055f221 -r 985e0f1536d43881328e47c7918e0ddb762209f4 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -214,6 +214,8 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
+        self.data_software = \
+                self._handle['gridded_data_format'].attrs['data_software']
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]


https://bitbucket.org/yt_analysis/yt-3.0/commits/3eec3d114f2d/
Changeset:   3eec3d114f2d
Branch:      yt
User:        xarthisius
Date:        2013-06-22 11:44:08
Summary:     [gdf] don't reduce dds to 1 for nonexisting dimensions for Piernik, as it breaks 2D data handling
Affected #:  1 file

diff -r 985e0f1536d43881328e47c7918e0ddb762209f4 -r 3eec3d114f2da8bb99d7bc09837049399ea4d9cc yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -74,8 +74,9 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.data_software != "piernik":
+            if self.pf.dimensionality < 2: self.dds[1] = 1.0
+            if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):


https://bitbucket.org/yt_analysis/yt-3.0/commits/3fa604abdc65/
Changeset:   3fa604abdc65
Branch:      yt
User:        xarthisius
Date:        2013-06-22 15:45:27
Summary:     [gdf] provide sane default for 'data_software' since it is an optional attribute. Thanks Matt!
Affected #:  1 file

diff -r 3eec3d114f2da8bb99d7bc09837049399ea4d9cc -r 3fa604abdc651b5bb46bd3e33b971fa9857040cb yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -215,8 +215,11 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
-        self.data_software = \
+        if 'data_software' in self._handle['gridded_data_format'].attrs:
+            self.data_software = \
                 self._handle['gridded_data_format'].attrs['data_software']
+        else:
+            self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]


https://bitbucket.org/yt_analysis/yt-3.0/commits/62e723e2f60c/
Changeset:   62e723e2f60c
Branch:      yt
User:        MatthewTurk
Date:        2013-06-29 23:07:35
Summary:     Merged in xarthisius/yt (pull request #535)

Fix 2D data handling for PIERNIK
Affected #:  1 file

diff -r 8e8bd5b353df99c1510058a7c982329e5212fcfe -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -74,8 +74,9 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.data_software != "piernik":
+            if self.pf.dimensionality < 2: self.dds[1] = 1.0
+            if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
 class GDFHierarchy(AMRHierarchy):
@@ -214,6 +215,11 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
+        if 'data_software' in self._handle['gridded_data_format'].attrs:
+            self.data_software = \
+                self._handle['gridded_data_format'].attrs['data_software']
+        else:
+            self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]


https://bitbucket.org/yt_analysis/yt-3.0/commits/4609f5c48f8a/
Changeset:   4609f5c48f8a
Branch:      yt
User:        samskillman
Date:        2013-06-04 19:43:51
Summary:     Initial go at cython amrkdtree. Works, but doesn't show a huge improvement.
Needs cleaning.
Affected #:  5 files

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -26,6 +26,7 @@
 import numpy as np
 from yt.funcs import *
 from yt.utilities.lib import kdtree_get_choices
+from yt.utilities.lib.amr_kdtools import kd_is_leaf
 
 def _lchild_id(node_id): return (node_id<<1)
 def _rchild_id(node_id): return (node_id<<1) + 1
@@ -309,59 +310,6 @@
     else:
         return kd_node_check(node.left)+kd_node_check(node.right)
 
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
 def depth_first_touch(tree, max_node=None):
     '''
     Yields a depth-first traversal of the kd tree always going to
@@ -392,64 +340,64 @@
         current, previous = step_depth(current, previous)
 
 
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
+# def viewpoint_traverse(tree, viewpoint):
+#     '''
+#     Yields a viewpoint dependent traversal of the kd-tree.  Starts
+#     with nodes furthest away from viewpoint.
+#     '''
+# 
+#     current = tree.trunk
+#     previous = None
+#     while current is not None:
+#         yield current
+#         current, previous = step_viewpoint(current, previous, viewpoint)
+# 
+# def step_viewpoint(current, previous, viewpoint):
+#     '''
+#     Takes a single step in the viewpoint based traversal.  Always
+#     goes to the node furthest away from viewpoint first.
+#     '''
+#     if kd_is_leaf(current): # At a leaf, move back up
+#         previous = current
+#         current = current.parent
+#     elif current.split.dim is None: # This is a dead node
+#         previous = current
+#         current = current.parent
+# 
+#     elif current.parent is previous: # Moving down
+#         previous = current
+#         if viewpoint[current.split.dim] <= current.split.pos:
+#             if current.right is not None:
+#                 current = current.right
+#             else:
+#                 previous = current.right
+#         else:
+#             if current.left is not None:
+#                 current = current.left
+#             else:
+#                 previous = current.left
+# 
+#     elif current.right is previous: # Moving up from right 
+#         previous = current
+#         if viewpoint[current.split.dim] <= current.split.pos:
+#             if current.left is not None:
+#                 current = current.left
+#             else:
+#                 current = current.parent
+#         else:
+#             current = current.parent
+# 
+#     elif current.left is previous: # Moving up from left child
+#         previous = current
+#         if viewpoint[current.split.dim] > current.split.pos:
+#             if current.right is not None:
+#                 current = current.right
+#             else:
+#                 current = current.parent
+#         else:
+#             current = current.parent
+# 
+#     return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,12 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
+from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+        depth_first_touch
+from yt.utilities.lib.amr_kdtools import Node, add_grids, add_grid, \
+        kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +69,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +92,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_grids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +101,35 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
 
+            add_grids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -135,7 +141,7 @@
     def sum_cells(self, all_cells=False):
         cells = 0
         for node in depth_traverse(self):
-            if node.grid is None:
+            if node.grid != -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
@@ -204,14 +210,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -269,12 +269,13 @@
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +293,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -427,7 +428,7 @@
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
                 i = node.id
-                if node.grid is not None:
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -40,3 +40,4 @@
 from .marching_cubes import *
 from .GridTree import *
 from .write_array import *
+from .amr_kdtools import *

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/amr_kdtools.pyx
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -0,0 +1,750 @@
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free, abs
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
+from field_interpolation_tables cimport \
+    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
+    FIT_eval_transfer_with_light
+from fixed_interpolator cimport *
+
+from cython.parallel import prange, parallel, threadid
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+
+DEF Nch = 4
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef class Node:
+
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public int node_id
+    cdef np.float64_t * left_edge
+    cdef np.float64_t * right_edge
+    cdef public data
+    cdef Split * split
+
+    def __cinit__(self, 
+                  Node parent, 
+                  Node left, 
+                  Node right, 
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  int grid,
+                  int node_id):
+        self.left = left
+        self.right = right
+        self.parent = parent
+        cdef int i
+        self.left_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        self.right_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.grid = grid
+        self.node_id = node_id
+
+    def print_me(self):
+        print 'Node %i' % self.node_id
+        print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], 
+                                   self.left_edge[2])
+        print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], 
+                                   self.right_edge[2])
+        print '\t grid: %i' % self.grid
+
+
+def get_left_edge(Node node):
+    le = np.empty(3, dtype='float64')
+    for i in range(3):
+        le[i] = node.left_edge[i]
+    return le
+
+def get_right_edge(Node node):
+    re = np.empty(3, dtype='float64')
+    for i in range(3):
+        re[i] = node.right_edge[i]
+    return re
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _lchild_id(int node_id):
+    return (node_id<<1)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _rchild_id(int node_id):
+    return (node_id<<1) + 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def _parent_id(int node_id):
+    return (node_id-1) >> 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_build(Node node, int rank, int size):
+    return 1
+    # if (node.node_id < size) or (node.node_id >= 2*size):
+    #     return 1 
+    # elif node.node_id - size == rank:
+    #     return 1 
+    # else:
+    #     return 0 
+
+def kd_traverse(Node trunk, viewpoint=None):
+    if viewpoint is None:
+        for node in depth_traverse(trunk):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+    else:
+        for node in viewpoint_traverse(trunk, viewpoint):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_grid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grid(node, gle, gre, gid, rank, size)
+    else:
+        less_id = gle[node.split.dim] < node.split.pos
+        if less_id:
+            add_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+        greater_id = gre[node.split.dim] > node.split.pos
+        if greater_id:
+            add_grid(node.right, gle, gre,
+                     gid, rank, size)
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def insert_grid(Node node, 
+                      np.ndarray[np.float64_t, ndim=1] gle, 
+                      np.ndarray[np.float64_t, ndim=1] gre, 
+                      int grid_id, 
+                      int rank,
+                      int size):
+    if not should_i_build(node, rank, size):
+        return
+
+    # If we should continue to split based on parallelism, do so!
+    # if should_i_split(node, rank, size):
+    #     geo_split(node, gle, gre, grid_id, rank, size)
+    #     return
+    cdef int contained = 1
+    for i in range(3):
+        if gle[i] > node.left_edge[i] or\
+           gre[i] < node.right_edge[i]:
+            contained *= 0
+
+    if contained == 1:
+        node.grid = grid_id 
+        assert(node.grid != -1)
+        return
+
+    # Split the grid
+    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1 
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_grids(Node node, 
+                    int ngrids,
+                    np.ndarray[np.float64_t, ndim=2] gles, 
+                    np.ndarray[np.float64_t, ndim=2] gres, 
+                    np.ndarray[np.int64_t, ndim=1] gids, 
+                    int rank,
+                    int size):
+    cdef int i, nless, ngreater
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, ngrids, gles, gres, gids, rank, size)
+        return
+
+    less_ids = gles[:,node.split.dim] < node.split.pos
+    greater_ids = gres[:,node.split.dim] > node.split.pos
+    nless = 0
+    ngreater = 0
+    for i in xrange(ngrids):
+        nless += less_ids[i]
+        ngreater += greater_ids[i]
+        
+    if nless > 0:
+        add_grids(node.left, nless, gles[less_ids], gres[less_ids],
+                  gids[less_ids], rank, size)
+
+    if ngreater > 0:
+        add_grids(node.right, ngreater, gles[greater_ids], gres[greater_ids],
+                  gids[greater_ids], rank, size)
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_split(Node node, int rank, int size):
+    if node.node_id < size:
+        return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void insert_grids(Node node, 
+                       int ngrids,
+                       np.ndarray[np.float64_t, ndim=2] gles, 
+                       np.ndarray[np.float64_t, ndim=2] gres, 
+                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       int rank,
+                       int size):
+    
+    if not should_i_build(node, rank, size) or ngrids == 0:
+        return
+    cdef int contained = 1
+    cdef int check
+
+    if ngrids == 1:
+        # If we should continue to split based on parallelism, do so!
+        #if should_i_split(node, rank, size):
+        #    geo_split(node, gles, gres, grid_ids, rank, size)
+        #    return
+
+        for i in range(3):
+            contained *= gles[0, i] <= node.left_edge[i]
+            contained *= gres[0, i] >= node.right_edge[i]
+    
+        if contained == 1:
+            # print 'Node fully contained, setting to grid: %i' % gids[0]
+            node.grid = gids[0]
+            assert(node.grid != -1)
+            return
+
+    # Split the grids
+    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def split_grid(Node node, 
+               np.ndarray[np.float64_t, ndim=1] gle, 
+               np.ndarray[np.float64_t, ndim=1] gre, 
+               int gid, 
+               int rank,
+               int size):
+    # Find a Split
+    data = np.empty((1, 2, 3), dtype='float64')
+    for i in range(3):
+        data[0, 0, i] = gle[i]
+        data[0, 1, i] = gre[i]
+        # print 'Single Data: ', gle[i], gre[i]
+
+    le = np.empty(3)
+    re = np.empty(3)
+    for i in range(3):
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+
+    best_dim, split_pos, less_id, greater_id = \
+        kdtree_get_choices(1, data, le, re)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if less_id == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if greater_id == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids, 
+                        np.ndarray[np.float64_t, ndim=3] data,
+                        np.ndarray[np.float64_t, ndim=1] l_corner,
+                        np.ndarray[np.float64_t, ndim=1] r_corner):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i, j, dim] and
+                        data[i, j, dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i, j, dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i, j, dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
+    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    for i in range(n_grids):
+        if data[i, 0, best_dim] < split:
+            less_ids[i] = 1
+        else:
+            less_ids[i] = 0
+        if data[i, 1, best_dim] > split:
+            greater_ids[i] = 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, less_ids.view('bool'), greater_ids.view('bool')
+
+#@cython.boundscheck(True)
+#@cython.wraparound(False)
+#@cython.cdivision(True)
+cdef int split_grids(Node node, 
+                       int ngrids,
+                       np.ndarray[np.float64_t, ndim=2] gles, 
+                       np.ndarray[np.float64_t, ndim=2] gres, 
+                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       int rank,
+                       int size):
+    # Find a Split
+    cdef int i, j, k
+
+    le = get_left_edge(node)
+    re = get_right_edge(node)
+
+    data = np.array([(gles[i,:], gres[i,:]) for i in
+        xrange(ngrids)], copy=False)
+    best_dim, split_pos, less_ids, greater_ids = \
+        kdtree_get_choices(ngrids, data, le, re)
+ 
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grids.'
+        return -1
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    nless = np.sum(less_ids)
+    ngreat = np.sum(greater_ids)
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grids(node.left, nless, gles[less_ids], gres[less_ids],
+                 gids[less_ids], rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grids(node.right, ngreat, gles[greater_ids], gres[greater_ids],
+                 gids[greater_ids], rank, size)
+
+    return 0
+
+# def geo_split_grid(node, gle, gre, grid_id, rank, size):
+#     big_dim = np.argmax(gre-gle)
+#     new_pos = (gre[big_dim] + gle[big_dim])/2.
+#     old_gre = gre.copy()
+#     new_gle = gle.copy()
+#     new_gle[big_dim] = new_pos
+#     gre[big_dim] = new_pos
+# 
+#     split = Split(big_dim, new_pos)
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     insert_grid(node.left, gle, gre,
+#                 grid_id, rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     insert_grid(node.right, new_gle, old_gre,
+#                 grid_id, rank, size)
+#     return
+# 
+# 
+# def geo_split(node, gles, gres, grid_ids, rank, size):
+#     big_dim = np.argmax(gres[0]-gles[0])
+#     new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
+#     old_gre = gres[0].copy()
+#     new_gle = gles[0].copy()
+#     new_gle[big_dim] = new_pos
+#     gres[0][big_dim] = new_pos
+#     gles = np.append(gles, np.array([new_gle]), axis=0)
+#     gres = np.append(gres, np.array([old_gre]), axis=0)
+#     grid_ids = np.append(grid_ids, grid_ids, axis=0)
+# 
+#     split = Split(big_dim, new_pos)
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     insert_grids(node.left, gles[:1], gres[:1],
+#             grid_ids[:1], rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     insert_grids(node.right, gles[1:], gres[1:],
+#             grid_ids[1:], rank, size)
+#     return
+
+cdef new_right(Node node, Split * split):
+    new_right = Node.right_edge.copy()
+    new_right[split.dim] = split.pos
+    return new_right
+
+cdef new_left(Node node, Split * split):
+    new_left = Node.left_edge.copy()
+    new_left[split.dim] = split.pos
+    return new_left
+
+cdef void divide(Node node, Split * split):
+    # Create a Split
+    node.split = split
+    
+    lle = np.zeros(3, dtype='float64')
+    lre = np.zeros(3, dtype='float64')
+    rle = np.zeros(3, dtype='float64')
+    rre = np.zeros(3, dtype='float64')
+
+    cdef int i
+    for i in range(3):
+        lle[i] = node.left_edge[i]
+        lre[i] = node.right_edge[i]
+        rle[i] = node.left_edge[i]
+        rre[i] = node.right_edge[i]
+    lre[split.dim] = split.pos
+    rle[split.dim] = split.pos
+
+    node.left = Node(node, None, None,
+            lle.copy(), lre.copy(), node.grid,
+                     _lchild_id(node.node_id))
+
+    node.right = Node(node, None, None,
+            rle.copy(), rre.copy(), node.grid,
+                      _rchild_id(node.node_id))
+    
+    return
+# 
+def kd_sum_volume(Node node):
+    cdef np.float64_t vol = 1.0
+    if (node.left is None) and (node.right is None):
+        if node.grid == -1:
+            return 0.0
+        for i in range(3):
+            vol *= node.right_edge[i] - node.left_edge[i]
+        return vol 
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+# 
+# def kd_sum_cells(node):
+#     if (node.left is None) and (node.right is None):
+#         if node.grid is None:
+#             return 0.0
+#         return np.prod(node.right_edge - node.left_edge)
+#     else:
+#         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+# 
+# 
+
+def kd_node_check(Node node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid != -1:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+
+def kd_is_leaf(Node node):
+    cdef int has_l_child = node.left == None
+    cdef int has_r_child = node.right == None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+ 
+def depth_traverse(Node trunk, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+# 
+# def depth_first_touch(tree, max_node=None):
+#     '''
+#     Yields a depth-first traversal of the kd tree always going to
+#     the left child before the right.
+#     '''
+#     current = tree.trunk
+#     previous = None
+#     if max_node is None:
+#         max_node = np.inf
+#     while current is not None:
+#         if previous is None or previous.parent != current:
+#             yield current
+#         current, previous = step_depth(current, previous)
+#         if current is None: break
+#         if current.id >= max_node:
+#             current = current.parent
+#             previous = current.right
+# 
+# def breadth_traverse(tree):
+#     '''
+#     Yields a breadth-first traversal of the kd tree always going to
+#     the left child before the right.
+#     '''
+#     current = tree.trunk
+#     previous = None
+#     while current is not None:
+#         yield current
+#         current, previous = step_depth(current, previous)
+# 
+# 
+def viewpoint_traverse(tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(Node current, 
+                   Node previous, 
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+# 
+# 
+# def receive_and_reduce(comm, incoming_rank, image, add_to_front):
+#     mylog.debug( 'Receiving image from %04i' % incoming_rank)
+#     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
+#     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
+#         (image.shape[0], image.shape[1], image.shape[2]))
+# 
+#     if add_to_front:
+#         front = arr2
+#         back = image
+#     else:
+#         front = image
+#         back = arr2
+# 
+#     if image.shape[2] == 3:
+#         # Assume Projection Camera, Add
+#         np.add(image, front, image)
+#         return image
+# 
+#     ta = 1.0 - front[:,:,3]
+#     np.maximum(ta, 0.0, ta)
+#     # This now does the following calculation, but in a memory
+#     # conservative fashion
+#     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
+#     image = back.copy()
+#     for i in range(4):
+#         np.multiply(image[:,:,i], ta, image[:,:,i])
+#     np.add(image, front, image)
+#     return image
+# 
+# def send_to_parent(comm, outgoing_rank, image):
+#     mylog.debug( 'Sending image to %04i' % outgoing_rank)
+#     comm.send_array(image, outgoing_rank, tag=comm.rank)
+# 
+# def scatter_image(comm, root, image):
+#     mylog.debug( 'Scattering from %04i' % root)
+#     image = comm.mpi_bcast(image, root=root)
+#     return image
+# 
+# def find_node(node, pos):
+#     """
+#     Find the AMRKDTree node enclosing a position
+#     """
+#     assert(np.all(node.left_edge <= pos))
+#     assert(np.all(node.right_edge > pos))
+#     while not kd_is_leaf(node):
+#         if pos[node.split.dim] < node.split.pos:
+#             node = node.left
+#         else:
+#             node = node.right
+#     return node
+
+

diff -r e155fde14e4e4735782c5a5afffe52b65c9ba2b1 -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,10 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_extension("amr_kdtools", 
+                         ["yt/utilities/lib/amr_kdtools.pyx"],
+                         extra_compile_args=['-O3'],
+                         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]


https://bitbucket.org/yt_analysis/yt-3.0/commits/aa34c033a081/
Changeset:   aa34c033a081
Branch:      yt
User:        samskillman
Date:        2013-06-05 19:29:22
Summary:     Okay, traversal and rendering now works. 2x speedup for now. Need to move everything
away from ndarrays next.
Affected #:  2 files

diff -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f -r aa34c033a0816641aa0249fc52e7c5df781004cd yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,7 @@
 from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
         depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_grids, add_grid, \
+from yt.utilities.lib.amr_kdtools import Node, add_grids, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -210,7 +210,7 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        for node in kd_traverse(self.tree.trunk):
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
             yield self.get_brick_data(node)
 
     def get_node(self, nodeid):

diff -r 4609f5c48f8ad7b3efd0fbcf60eba4890f3d7a5f -r aa34c033a0816641aa0249fc52e7c5df781004cd yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -118,18 +118,67 @@
                 yield node
 
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def add_grid(Node node, 
-                   np.ndarray[np.float64_t, ndim=1] gle, 
-                   np.ndarray[np.float64_t, ndim=1] gre, 
-                   int gid, 
-                   int rank,
-                   int size):
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def add_grid(Node node, 
+#                    np.ndarray[np.float64_t, ndim=1] gle, 
+#                    np.ndarray[np.float64_t, ndim=1] gre, 
+#                    int gid, 
+#                    int rank,
+#                    int size):
+# 
+#     if not should_i_build(node, rank, size):
+#         return
+# 
+#     if kd_is_leaf(node):
+#         insert_grid(node, gle, gre, gid, rank, size)
+#     else:
+#         less_id = gle[node.split.dim] < node.split.pos
+#         if less_id:
+#             add_grid(node.left, gle, gre,
+#                      gid, rank, size)
+# 
+#         greater_id = gre[node.split.dim] > node.split.pos
+#         if greater_id:
+#             add_grid(node.right, gle, gre,
+#                      gid, rank, size)
+#     return
 
-    if not should_i_build(node, rank, size):
-        return
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def insert_grid(Node node, 
+#                       np.ndarray[np.float64_t, ndim=1] gle, 
+#                       np.ndarray[np.float64_t, ndim=1] gre, 
+#                       int grid_id, 
+#                       int rank,
+#                       int size):
+#     if not should_i_build(node, rank, size):
+#         return
+# 
+#     # If we should continue to split based on parallelism, do so!
+#     # if should_i_split(node, rank, size):
+#     #     geo_split(node, gle, gre, grid_id, rank, size)
+#     #     return
+#     cdef int contained = 1
+#     for i in range(3):
+#         if gle[i] > node.left_edge[i] or\
+#            gre[i] < node.right_edge[i]:
+#             contained *= 0
+# 
+#     if contained == 1:
+#         node.grid = grid_id 
+#         assert(node.grid != -1)
+#         return
+# 
+#     # Split the grid
+#     cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+#     # If check is -1, then we have found a place where there are no choices.
+#     # Exit out and set the node to None.
+#     if check == -1:
+#         node.grid = -1 
+#     return
 
     if kd_is_leaf(node):
         insert_grid(node, gle, gre, gid, rank, size)
@@ -190,7 +239,8 @@
                     np.ndarray[np.int64_t, ndim=1] gids, 
                     int rank,
                     int size):
-    cdef int i, nless, ngreater
+    cdef int i, j, nless, ngreater
+    cdef np.int64_t gid
     if not should_i_build(node, rank, size):
         return
 
@@ -198,21 +248,53 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids = gles[:,node.split.dim] < node.split.pos
-    greater_ids = gres[:,node.split.dim] > node.split.pos
+    cdef np.ndarray less_ids = np.zeros(ngrids, dtype='int64')
+    cdef np.ndarray greater_ids = np.zeros(ngrids, dtype='int64')
+   
     nless = 0
     ngreater = 0
-    for i in xrange(ngrids):
-        nless += less_ids[i]
-        ngreater += greater_ids[i]
-        
+    for i in range(ngrids):
+        if gles[i, node.split.dim] < node.split.pos:
+            less_ids[nless] = i
+            nless += 1
+            
+        if gres[i, node.split.dim] > node.split.pos:
+            greater_ids[ngreater] = i
+            ngreater += 1
+
+    #print 'nless: %i' % nless
+    #print 'ngreater: %i' % ngreater
+
+    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+
+    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+
+    cdef int index
+    for i in range(nless):
+        index = less_ids[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i, j] = gles[index, j]
+            less_gres[i, j] = gres[index, j]
+
     if nless > 0:
-        add_grids(node.left, nless, gles[less_ids], gres[less_ids],
-                  gids[less_ids], rank, size)
+        add_grids(node.left, nless, less_gles, less_gres,
+                  l_ids, rank, size)
+            
+    for i in range(ngreater):
+        index = greater_ids[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i, j] = gles[index, j]
+            greater_gres[i, j] = gres[index, j]
 
     if ngreater > 0:
-        add_grids(node.right, ngreater, gles[greater_ids], gres[greater_ids],
-                  gids[greater_ids], rank, size)
+        add_grids(node.right, ngreater, greater_gles, greater_gres,
+                  g_ids, rank, size)
     return
 
 @cython.boundscheck(False)
@@ -263,68 +345,75 @@
         node.grid = -1
     return
 
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-def split_grid(Node node, 
-               np.ndarray[np.float64_t, ndim=1] gle, 
-               np.ndarray[np.float64_t, ndim=1] gre, 
-               int gid, 
-               int rank,
-               int size):
-    # Find a Split
-    data = np.empty((1, 2, 3), dtype='float64')
-    for i in range(3):
-        data[0, 0, i] = gle[i]
-        data[0, 1, i] = gre[i]
-        # print 'Single Data: ', gle[i], gre[i]
-
-    le = np.empty(3)
-    re = np.empty(3)
-    for i in range(3):
-        le[i] = node.left_edge[i]
-        re[i] = node.right_edge[i]
-
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(1, data, le, re)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        print 'Failed to split grid.'
-        return -1
-
-        
-    split = <Split *> malloc(sizeof(Split))
-    split.dim = best_dim
-    split.pos = split_pos
-
-    #del data
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id == 1:
-        insert_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id == 1:
-        insert_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-    return 0
+# @cython.boundscheck(False)
+# @cython.wraparound(False)
+# @cython.cdivision(True)
+# def split_grid(Node node, 
+#                np.ndarray[np.float64_t, ndim=1] gle, 
+#                np.ndarray[np.float64_t, ndim=1] gre, 
+#                int gid, 
+#                int rank,
+#                int size):
+#     # Find a Split
+#     data = np.empty((1, 2, 3), dtype='float64')
+#     for i in range(3):
+#         data[0, 0, i] = gle[i]
+#         data[0, 1, i] = gre[i]
+#         # print 'Single Data: ', gle[i], gre[i]
+# 
+#     le = np.empty(3)
+#     re = np.empty(3)
+#     for i in range(3):
+#         le[i] = node.left_edge[i]
+#         re[i] = node.right_edge[i]
+# 
+#     best_dim, split_pos, less_id, greater_id = \
+#         kdtree_get_choices(1, data, le, re)
+# 
+#     # If best_dim is -1, then we have found a place where there are no choices.
+#     # Exit out and set the node to None.
+#     if best_dim == -1:
+#         print 'Failed to split grid.'
+#         return -1
+# 
+#         
+#     split = <Split *> malloc(sizeof(Split))
+#     split.dim = best_dim
+#     split.pos = split_pos
+# 
+#     #del data
+# 
+#     # Create a Split
+#     divide(node, split)
+# 
+#     # Populate Left Node
+#     #print 'Inserting left node', node.left_edge, node.right_edge
+#     if less_id == 1:
+#         insert_grid(node.left, gle, gre,
+#                      gid, rank, size)
+# 
+#     # Populate Right Node
+#     #print 'Inserting right node', node.left_edge, node.right_edge
+#     if greater_id == 1:
+#         insert_grid(node.right, gle, gre,
+#                      gid, rank, size)
+# 
+#     return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef kdtree_get_choices(int n_grids, 
-                        np.ndarray[np.float64_t, ndim=3] data,
-                        np.ndarray[np.float64_t, ndim=1] l_corner,
-                        np.ndarray[np.float64_t, ndim=1] r_corner):
+                        np.float64_t ***data,
+                        np.float64_t *l_corner,
+                        np.float64_t *r_corner,
+                        np.uint8_t *less_ids,
+                        np.uint8_t *greater_ids,
+                       ):
+# cdef kdtree_get_choices(int n_grids, 
+#                         np.ndarray[np.float64_t, ndim=3] data,
+#                         np.ndarray[np.float64_t, ndim=1] l_corner,
+#                         np.ndarray[np.float64_t, ndim=1] r_corner):
     cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
     cdef np.float64_t **uniquedims, *uniques, split
     uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
@@ -341,19 +430,19 @@
             # Check for disqualification
             for j in range(2):
                 # print "Checking against", i,j,dim,data[i,j,dim]
-                if not (l_corner[dim] < data[i, j, dim] and
-                        data[i, j, dim] < r_corner[dim]):
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
                     # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
                     continue
                 skipit = 0
                 # Add our left ...
                 for k in range(n_unique):
-                    if uniques[k] == data[i, j, dim]:
+                    if uniques[k] == data[i][j][dim]:
                         skipit = 1
                         # print "Identified", uniques[k], data[i,j,dim], n_unique
                         break
                 if skipit == 0:
-                    uniques[n_unique] = data[i, j, dim]
+                    uniques[n_unique] = data[i][j][dim]
                     n_unique += 1
         if n_unique > my_max:
             best_dim = dim
@@ -366,19 +455,20 @@
         tarr[i] = uniquedims[best_dim][i]
     tarr.sort()
     split = tarr[my_split]
-    cdef np.ndarray[np.uint8_t, ndim=1] less_ids = np.empty(n_grids, dtype='uint8')
-    cdef np.ndarray[np.uint8_t, ndim=1] greater_ids = np.empty(n_grids, dtype='uint8')
+    cdef int nless=0, ngreater=0
     for i in range(n_grids):
-        if data[i, 0, best_dim] < split:
+        if data[i][0][best_dim] < split:
             less_ids[i] = 1
+            nless += 1
         else:
             less_ids[i] = 0
-        if data[i, 1, best_dim] > split:
+        if data[i][1][best_dim] > split:
             greater_ids[i] = 1
+            ngreater += 1
         else:
             greater_ids[i] = 0
     # Return out unique values
-    return best_dim, split, less_ids.view('bool'), greater_ids.view('bool')
+    return best_dim, split, nless, ngreater
 
 #@cython.boundscheck(True)
 #@cython.wraparound(False)
@@ -396,10 +486,21 @@
     le = get_left_edge(node)
     re = get_right_edge(node)
 
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(ngrids)], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(ngrids, data, le, re)
+    data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
+    for i in range(ngrids):
+        data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+        for j in range(2):
+            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        for j in range(3):
+            data[i][0][j] = gles[i, j]
+            data[i][1][j] = gres[i, j]
+
+    less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
  
     # If best_dim is -1, then we have found a place where there are no choices.
     # Exit out and set the node to None.
@@ -416,17 +517,54 @@
     # Create a Split
     divide(node, split)
 
-    nless = np.sum(less_ids)
-    ngreat = np.sum(greater_ids)
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, nless, gles[less_ids], gres[less_ids],
-                 gids[less_ids], rank, size)
+    cdef np.ndarray less_index = np.zeros(ngrids, dtype='int64')
+    cdef np.ndarray greater_index = np.zeros(ngrids, dtype='int64')
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if less_ids[i] == 1:
+            less_index[nless] = i
+            nless += 1
 
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, ngreat, gles[greater_ids], gres[greater_ids],
-                 gids[greater_ids], rank, size)
+        if greater_ids[i] == 1:
+            greater_index[ngreater] = i
+            ngreater += 1
+
+    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
+    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+
+    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
+    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+
+    cdef int index
+    for i in range(nless):
+        index = less_index[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i, j] = gles[index, j]
+            less_gres[i, j] = gres[index, j]
+
+    if nless > 0:
+        # Populate Left Node
+        #print 'Inserting left node', node.left_edge, node.right_edge
+        insert_grids(node.left, nless, less_gles, less_gres,
+                     l_ids, rank, size)
+
+    for i in range(ngreater):
+        index = greater_index[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i, j] = gles[index, j]
+            greater_gres[i, j] = gres[index, j]
+
+    if ngreater > 0:
+        # Populate Right Node
+        #print 'Inserting right node', node.left_edge, node.right_edge
+        insert_grids(node.right, ngreater, greater_gles, greater_gres,
+                     g_ids, rank, size)
 
     return 0
 
@@ -640,7 +778,7 @@
     with nodes furthest away from viewpoint.
     '''
 
-    current = tree.trunk
+    current = tree
     previous = None
     while current is not None:
         yield current


https://bitbucket.org/yt_analysis/yt-3.0/commits/699c480bfb50/
Changeset:   699c480bfb50
Branch:      yt
User:        samskillman
Date:        2013-06-05 21:24:20
Summary:     Move from ndarray to np.float64_t pointers. Now up to 10x over original.
Affected #:  3 files

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,7 @@
 from amr_kdtools import \
         receive_and_reduce, send_to_parent, scatter_image, find_node, \
         depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_grids, \
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -92,7 +92,7 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gids.size, gles, gres, gids, 
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
                               self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
@@ -108,7 +108,7 @@
             gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
             gids = np.array([g.id for g in grids if g.Level == lvl])
 
-            add_grids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
             del gles, gres, gids
 
 

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -26,13 +26,13 @@
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef public Node left
-    cdef public Node right
-    cdef public Node parent
-    cdef public int grid
-    cdef public int node_id
-    cdef np.float64_t * left_edge
-    cdef np.float64_t * right_edge
+    cdef readonly Node left
+    cdef readonly Node right
+    cdef readonly Node parent
+    cdef readonly int grid
+    cdef readonly int node_id
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
     cdef public data
     cdef Split * split
 
@@ -48,8 +48,6 @@
         self.right = right
         self.parent = parent
         cdef int i
-        self.left_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
-        self.right_edge = <np.float64_t *> malloc(sizeof(np.float64_t) * 3)
         for i in range(3):
             self.left_edge[i] = left_edge[i]
             self.right_edge[i] = right_edge[i]
@@ -99,13 +97,12 @@
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_build(Node node, int rank, int size):
-    return 1
-    # if (node.node_id < size) or (node.node_id >= 2*size):
-    #     return 1 
-    # elif node.node_id - size == rank:
-    #     return 1 
-    # else:
-    #     return 0 
+    if (node.node_id < size) or (node.node_id >= 2*size):
+        return 1 
+    elif node.node_id - size == rank:
+        return 1 
+    else:
+        return 0 
 
 def kd_traverse(Node trunk, viewpoint=None):
     if viewpoint is None:
@@ -239,6 +236,35 @@
                     np.ndarray[np.int64_t, ndim=1] gids, 
                     int rank,
                     int size):
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    for i in range(ngrids):
+        pgles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgids[i] = gids[i]
+        for j in range(3):
+            pgles[i][j] = gles[i, j]
+            pgres[i][j] = gres[i, j]
+
+    add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
+
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grids(Node node, 
+                    int ngrids,
+                    np.float64_t **gles, 
+                    np.float64_t **gres, 
+                    np.int64_t *gids, 
+                    int rank,
+                    int size):
     cdef int i, j, nless, ngreater
     cdef np.int64_t gid
     if not should_i_build(node, rank, size):
@@ -248,38 +274,44 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    cdef np.ndarray less_ids = np.zeros(ngrids, dtype='int64')
-    cdef np.ndarray greater_ids = np.zeros(ngrids, dtype='int64')
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
     for i in range(ngrids):
-        if gles[i, node.split.dim] < node.split.pos:
+        if gles[i][node.split.dim] < node.split.pos:
             less_ids[nless] = i
             nless += 1
             
-        if gres[i, node.split.dim] > node.split.pos:
+        if gres[i][node.split.dim] > node.split.pos:
             greater_ids[ngreater] = i
             ngreater += 1
 
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
         index = less_ids[i]
         l_ids[i] = gids[index]
         for j in range(3):
-            less_gles[i, j] = gles[index, j]
-            less_gres[i, j] = gres[index, j]
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
 
     if nless > 0:
         add_grids(node.left, nless, less_gles, less_gres,
@@ -289,8 +321,8 @@
         index = greater_ids[i]
         g_ids[i] = gids[index]
         for j in range(3):
-            greater_gles[i, j] = gles[index, j]
-            greater_gres[i, j] = gres[index, j]
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
 
     if ngreater > 0:
         add_grids(node.right, ngreater, greater_gles, greater_gres,
@@ -310,9 +342,9 @@
 @cython.cdivision(True)
 cdef void insert_grids(Node node, 
                        int ngrids,
-                       np.ndarray[np.float64_t, ndim=2] gles, 
-                       np.ndarray[np.float64_t, ndim=2] gres, 
-                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
                        int rank,
                        int size):
     
@@ -328,8 +360,8 @@
         #    return
 
         for i in range(3):
-            contained *= gles[0, i] <= node.left_edge[i]
-            contained *= gres[0, i] >= node.right_edge[i]
+            contained *= gles[0][i] <= node.left_edge[i]
+            contained *= gres[0][i] >= node.right_edge[i]
     
         if contained == 1:
             # print 'Node fully contained, setting to grid: %i' % gids[0]
@@ -470,30 +502,27 @@
     # Return out unique values
     return best_dim, split, nless, ngreater
 
-#@cython.boundscheck(True)
+#@cython.boundscheck(False)
 #@cython.wraparound(False)
 #@cython.cdivision(True)
 cdef int split_grids(Node node, 
                        int ngrids,
-                       np.ndarray[np.float64_t, ndim=2] gles, 
-                       np.ndarray[np.float64_t, ndim=2] gres, 
-                       np.ndarray[np.int64_t, ndim=1] gids, 
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
                        int rank,
                        int size):
     # Find a Split
     cdef int i, j, k
 
-    le = get_left_edge(node)
-    re = get_right_edge(node)
-
     data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
     for i in range(ngrids):
         data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
         for j in range(2):
             data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         for j in range(3):
-            data[i][0][j] = gles[i, j]
-            data[i][1][j] = gres[i, j]
+            data[i][0][j] = gles[i][j]
+            data[i][1][j] = gres[i][j]
 
     less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
     greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
@@ -517,8 +546,8 @@
     # Create a Split
     divide(node, split)
 
-    cdef np.ndarray less_index = np.zeros(ngrids, dtype='int64')
-    cdef np.ndarray greater_index = np.zeros(ngrids, dtype='int64')
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -531,21 +560,27 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    cdef np.ndarray less_gles = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray less_gres = np.zeros([nless, 3], dtype='float64')
-    cdef np.ndarray l_ids = np.zeros(nless, dtype='int64')
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    cdef np.ndarray greater_gles = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray greater_gres = np.zeros([ngreater, 3], dtype='float64')
-    cdef np.ndarray g_ids = np.zeros(ngreater, dtype='int64')
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
         index = less_index[i]
         l_ids[i] = gids[index]
         for j in range(3):
-            less_gles[i, j] = gles[index, j]
-            less_gres[i, j] = gres[index, j]
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
 
     if nless > 0:
         # Populate Left Node
@@ -557,8 +592,8 @@
         index = greater_index[i]
         g_ids[i] = gids[index]
         for j in range(3):
-            greater_gles[i, j] = gles[index, j]
-            greater_gres[i, j] = gres[index, j]
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
 
     if ngreater > 0:
         # Populate Right Node
@@ -634,28 +669,25 @@
     # Create a Split
     node.split = split
     
-    lle = np.zeros(3, dtype='float64')
-    lre = np.zeros(3, dtype='float64')
-    rle = np.zeros(3, dtype='float64')
-    rre = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] le = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] re = np.zeros(3, dtype='float64')
 
     cdef int i
     for i in range(3):
-        lle[i] = node.left_edge[i]
-        lre[i] = node.right_edge[i]
-        rle[i] = node.left_edge[i]
-        rre[i] = node.right_edge[i]
-    lre[split.dim] = split.pos
-    rle[split.dim] = split.pos
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+    re[split.dim] = split.pos
 
     node.left = Node(node, None, None,
-            lle.copy(), lre.copy(), node.grid,
+                     le, re, node.grid,
                      _lchild_id(node.node_id))
 
+    re[split.dim] = node.right_edge[split.dim]
+    le[split.dim] = split.pos
     node.right = Node(node, None, None,
-            rle.copy(), rre.copy(), node.grid,
+                      le, re, node.grid,
                       _rchild_id(node.node_id))
-    
+
     return
 # 
 def kd_sum_volume(Node node):
@@ -668,16 +700,6 @@
         return vol 
     else:
         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-# 
-# def kd_sum_cells(node):
-#     if (node.left is None) and (node.right is None):
-#         if node.grid is None:
-#             return 0.0
-#         return np.prod(node.right_edge - node.left_edge)
-#     else:
-#         return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-# 
-# 
 
 def kd_node_check(Node node):
     assert (node.left is None) == (node.right is None)
@@ -688,7 +710,6 @@
     else:
         return kd_node_check(node.left)+kd_node_check(node.right)
 
-
 def kd_is_leaf(Node node):
     cdef int has_l_child = node.left == None
     cdef int has_r_child = node.right == None
@@ -741,37 +762,37 @@
         if current.node_id >= max_node:
             current = current.parent
             previous = current.right
-# 
-# def depth_first_touch(tree, max_node=None):
-#     '''
-#     Yields a depth-first traversal of the kd tree always going to
-#     the left child before the right.
-#     '''
-#     current = tree.trunk
-#     previous = None
-#     if max_node is None:
-#         max_node = np.inf
-#     while current is not None:
-#         if previous is None or previous.parent != current:
-#             yield current
-#         current, previous = step_depth(current, previous)
-#         if current is None: break
-#         if current.id >= max_node:
-#             current = current.parent
-#             previous = current.right
-# 
-# def breadth_traverse(tree):
-#     '''
-#     Yields a breadth-first traversal of the kd tree always going to
-#     the left child before the right.
-#     '''
-#     current = tree.trunk
-#     previous = None
-#     while current is not None:
-#         yield current
-#         current, previous = step_depth(current, previous)
-# 
-# 
+
+def depth_first_touch(tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree.trunk
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
 def viewpoint_traverse(tree, viewpoint):
     '''
     Yields a viewpoint dependent traversal of the kd-tree.  Starts
@@ -832,57 +853,28 @@
             current = current.parent
 
     return current, previous
-# 
-# 
-# def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-#     mylog.debug( 'Receiving image from %04i' % incoming_rank)
-#     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
-#     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
-#         (image.shape[0], image.shape[1], image.shape[2]))
-# 
-#     if add_to_front:
-#         front = arr2
-#         back = image
-#     else:
-#         front = image
-#         back = arr2
-# 
-#     if image.shape[2] == 3:
-#         # Assume Projection Camera, Add
-#         np.add(image, front, image)
-#         return image
-# 
-#     ta = 1.0 - front[:,:,3]
-#     np.maximum(ta, 0.0, ta)
-#     # This now does the following calculation, but in a memory
-#     # conservative fashion
-#     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
-#     image = back.copy()
-#     for i in range(4):
-#         np.multiply(image[:,:,i], ta, image[:,:,i])
-#     np.add(image, front, image)
-#     return image
-# 
-# def send_to_parent(comm, outgoing_rank, image):
-#     mylog.debug( 'Sending image to %04i' % outgoing_rank)
-#     comm.send_array(image, outgoing_rank, tag=comm.rank)
-# 
-# def scatter_image(comm, root, image):
-#     mylog.debug( 'Scattering from %04i' % root)
-#     image = comm.mpi_bcast(image, root=root)
-#     return image
-# 
-# def find_node(node, pos):
-#     """
-#     Find the AMRKDTree node enclosing a position
-#     """
-#     assert(np.all(node.left_edge <= pos))
-#     assert(np.all(node.right_edge > pos))
-#     while not kd_is_leaf(node):
-#         if pos[node.split.dim] < node.split.pos:
-#             node = node.left
-#         else:
-#             node = node.right
-#     return node
 
+cdef int point_in_node(Node node, 
+                       np.ndarray[np.float64_t, ndim=1] point):
+    cdef int i
+    cdef int inside = 1
+    for i in range(3):
+        inside *= node.left_edge[i] <= point[i]
+        inside *= node.right_edge[i] > point[i]
+    return inside
 
+
+def find_node(Node node,
+              np.ndarray[np.float64_t, ndim=1] point):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(point_in_node(node, point))
+    while not kd_is_leaf(node):
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+
+

diff -r aa34c033a0816641aa0249fc52e7c5df781004cd -r 699c480bfb50a241d28c18693e65551820e1caae yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -251,7 +251,6 @@
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
-                         extra_compile_args=['-O3'],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":


https://bitbucket.org/yt_analysis/yt-3.0/commits/8587b273c9b8/
Changeset:   8587b273c9b8
Branch:      yt
User:        samskillman
Date:        2013-06-05 21:40:50
Summary:     Adding back in single grid adds through a add_pygrid wrapper.
Affected #:  1 file

diff -r 699c480bfb50a241d28c18693e65551820e1caae -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -114,68 +114,18 @@
             if kd_is_leaf(node) and node.grid != -1:
                 yield node
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grid(Node node, 
+                   np.float64_t *gle, 
+                   np.float64_t *gre, 
+                   int gid, 
+                   int rank,
+                   int size):
 
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def add_grid(Node node, 
-#                    np.ndarray[np.float64_t, ndim=1] gle, 
-#                    np.ndarray[np.float64_t, ndim=1] gre, 
-#                    int gid, 
-#                    int rank,
-#                    int size):
-# 
-#     if not should_i_build(node, rank, size):
-#         return
-# 
-#     if kd_is_leaf(node):
-#         insert_grid(node, gle, gre, gid, rank, size)
-#     else:
-#         less_id = gle[node.split.dim] < node.split.pos
-#         if less_id:
-#             add_grid(node.left, gle, gre,
-#                      gid, rank, size)
-# 
-#         greater_id = gre[node.split.dim] > node.split.pos
-#         if greater_id:
-#             add_grid(node.right, gle, gre,
-#                      gid, rank, size)
-#     return
-
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def insert_grid(Node node, 
-#                       np.ndarray[np.float64_t, ndim=1] gle, 
-#                       np.ndarray[np.float64_t, ndim=1] gre, 
-#                       int grid_id, 
-#                       int rank,
-#                       int size):
-#     if not should_i_build(node, rank, size):
-#         return
-# 
-#     # If we should continue to split based on parallelism, do so!
-#     # if should_i_split(node, rank, size):
-#     #     geo_split(node, gle, gre, grid_id, rank, size)
-#     #     return
-#     cdef int contained = 1
-#     for i in range(3):
-#         if gle[i] > node.left_edge[i] or\
-#            gre[i] < node.right_edge[i]:
-#             contained *= 0
-# 
-#     if contained == 1:
-#         node.grid = grid_id 
-#         assert(node.grid != -1)
-#         return
-# 
-#     # Split the grid
-#     cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
-#     # If check is -1, then we have found a place where there are no choices.
-#     # Exit out and set the node to None.
-#     if check == -1:
-#         node.grid = -1 
-#     return
+    if not should_i_build(node, rank, size):
+        return
 
     if kd_is_leaf(node):
         insert_grid(node, gle, gre, gid, rank, size)
@@ -191,15 +141,35 @@
                      gid, rank, size)
     return
 
+def add_pygrid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    cdef int j
+    for j in range(3):
+        pgles[j] = gle[j]
+        pgres[j] = gre[j]
+
+    add_grid(node, pgles, pgres, gid, rank, size)
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def insert_grid(Node node, 
-                      np.ndarray[np.float64_t, ndim=1] gle, 
-                      np.ndarray[np.float64_t, ndim=1] gre, 
-                      int grid_id, 
-                      int rank,
-                      int size):
+cdef insert_grid(Node node, 
+                np.float64_t *gle, 
+                np.float64_t *gre, 
+                int grid_id, 
+                int rank,
+                int size):
     if not should_i_build(node, rank, size):
         return
 
@@ -229,7 +199,7 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def add_grids(Node node, 
+def add_pygrids(Node node, 
                     int ngrids,
                     np.ndarray[np.float64_t, ndim=2] gles, 
                     np.ndarray[np.float64_t, ndim=2] gres, 
@@ -377,60 +347,61 @@
         node.grid = -1
     return
 
-# @cython.boundscheck(False)
-# @cython.wraparound(False)
-# @cython.cdivision(True)
-# def split_grid(Node node, 
-#                np.ndarray[np.float64_t, ndim=1] gle, 
-#                np.ndarray[np.float64_t, ndim=1] gre, 
-#                int gid, 
-#                int rank,
-#                int size):
-#     # Find a Split
-#     data = np.empty((1, 2, 3), dtype='float64')
-#     for i in range(3):
-#         data[0, 0, i] = gle[i]
-#         data[0, 1, i] = gre[i]
-#         # print 'Single Data: ', gle[i], gre[i]
-# 
-#     le = np.empty(3)
-#     re = np.empty(3)
-#     for i in range(3):
-#         le[i] = node.left_edge[i]
-#         re[i] = node.right_edge[i]
-# 
-#     best_dim, split_pos, less_id, greater_id = \
-#         kdtree_get_choices(1, data, le, re)
-# 
-#     # If best_dim is -1, then we have found a place where there are no choices.
-#     # Exit out and set the node to None.
-#     if best_dim == -1:
-#         print 'Failed to split grid.'
-#         return -1
-# 
-#         
-#     split = <Split *> malloc(sizeof(Split))
-#     split.dim = best_dim
-#     split.pos = split_pos
-# 
-#     #del data
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     if less_id == 1:
-#         insert_grid(node.left, gle, gre,
-#                      gid, rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     if greater_id == 1:
-#         insert_grid(node.right, gle, gre,
-#                      gid, rank, size)
-# 
-#     return 0
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef split_grid(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int gid, 
+               int rank,
+               int size):
+
+    cdef int j
+    data = <np.float64_t ***> malloc(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+    for j in range(2):
+        data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    for j in range(3):
+        data[0][0][j] = gle[j]
+        data[0][1][j] = gre[j]
+
+    less_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if nless == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if ngreater == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -442,10 +413,6 @@
                         np.uint8_t *less_ids,
                         np.uint8_t *greater_ids,
                        ):
-# cdef kdtree_get_choices(int n_grids, 
-#                         np.ndarray[np.float64_t, ndim=3] data,
-#                         np.ndarray[np.float64_t, ndim=1] l_corner,
-#                         np.ndarray[np.float64_t, ndim=1] r_corner):
     cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
     cdef np.float64_t **uniquedims, *uniques, split
     uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))


https://bitbucket.org/yt_analysis/yt-3.0/commits/4f019273c7c5/
Changeset:   4f019273c7c5
Branch:      yt
User:        samskillman
Date:        2013-06-05 22:02:17
Summary:     Updating the tests, and now they pass!  This does change the api for traversing
a tree, but that's not in the API, so I think this is ok.  No one should be
directly accessing that.
Affected #:  2 files

diff -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -140,19 +140,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid != -1:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):

diff -r 8587b273c9b851b2b0ca01601bd5d8813c9dd062 -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -24,7 +24,8 @@
 """
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.amr_kdtree.amr_kdtools import depth_traverse
+from yt.utilities.lib.amr_kdtools import depth_traverse, \
+        get_left_edge, get_right_edge
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -53,17 +54,19 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree):
+    for node in depth_traverse(kd.tree.trunk):
         if node.grid is None:
             continue
         grid = pf.h.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        tree_ok *= np.all(grid.LeftEdge <= node.left_edge)
-        tree_ok *= np.all(grid.RightEdge >= node.right_edge)
+        tree_ok *= np.all(grid.LeftEdge <= nle)
+        tree_ok *= np.all(grid.RightEdge >= nre)
         tree_ok *= np.all(dims > 0)
 
     yield assert_equal, True, tree_ok


https://bitbucket.org/yt_analysis/yt-3.0/commits/1833903a59d2/
Changeset:   1833903a59d2
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:45:38
Summary:     Clean up the old tools file, removing all the stuff that got moved to cython
and give it a PEP talk.
Affected #:  1 file

diff -r 4f019273c7c518fe90b5b482d7341e6e5260cea0 -r 1833903a59d29bddfdb79af67c209763db45221e yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,383 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-from yt.utilities.lib.amr_kdtools import kd_is_leaf
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-# def viewpoint_traverse(tree, viewpoint):
-#     '''
-#     Yields a viewpoint dependent traversal of the kd-tree.  Starts
-#     with nodes furthest away from viewpoint.
-#     '''
-# 
-#     current = tree.trunk
-#     previous = None
-#     while current is not None:
-#         yield current
-#         current, previous = step_viewpoint(current, previous, viewpoint)
-# 
-# def step_viewpoint(current, previous, viewpoint):
-#     '''
-#     Takes a single step in the viewpoint based traversal.  Always
-#     goes to the node furthest away from viewpoint first.
-#     '''
-#     if kd_is_leaf(current): # At a leaf, move back up
-#         previous = current
-#         current = current.parent
-#     elif current.split.dim is None: # This is a dead node
-#         previous = current
-#         current = current.parent
-# 
-#     elif current.parent is previous: # Moving down
-#         previous = current
-#         if viewpoint[current.split.dim] <= current.split.pos:
-#             if current.right is not None:
-#                 current = current.right
-#             else:
-#                 previous = current.right
-#         else:
-#             if current.left is not None:
-#                 current = current.left
-#             else:
-#                 previous = current.left
-# 
-#     elif current.right is previous: # Moving up from right 
-#         previous = current
-#         if viewpoint[current.split.dim] <= current.split.pos:
-#             if current.left is not None:
-#                 current = current.left
-#             else:
-#                 current = current.parent
-#         else:
-#             current = current.parent
-# 
-#     elif current.left is previous: # Moving up from left child
-#         previous = current
-#         if viewpoint[current.split.dim] > current.split.pos:
-#             if current.right is not None:
-#                 current = current.right
-#             else:
-#                 current = current.parent
-#         else:
-#             current = current.parent
-# 
-#     return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -418,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-


https://bitbucket.org/yt_analysis/yt-3.0/commits/e6a87c284bc7/
Changeset:   e6a87c284bc7
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:47:29
Summary:     So I had broken parallel rendering. Now it is unbroken. Added helper functions
to get split information since it is a struct now.
Affected #:  2 files

diff -r 1833903a59d29bddfdb79af67c209763db45221e -r e6a87c284bc75ac8440712b88c2623602ff09527 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -27,9 +27,9 @@
 import numpy as np
 import h5py
 from amr_kdtools import \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch
-from yt.utilities.lib.amr_kdtools import Node, add_pygrids, \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
         kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
@@ -233,13 +233,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -250,17 +250,18 @@
         node = self.get_node(nprocs + myrank)
 
         while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
+            if owners[node.parent.node_id] == myrank:
+                split_dim = node.parent.get_split_dim()
+                split_pos = node.parent.get_split_pos()
+                left_in_front = viewpoint[split_dim] < split_pos
                 #add_to_front = (left_in_front == (node == node.parent.right))
                 add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
+                image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
                                   image, add_to_front)
-                if node.parent.id == 1: break
+                if node.parent.node_id == 1: break
                 else: node = node.parent
             else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
+                send_to_parent(self.comm, owners[node.parent.node_id], image)
                 break
         image = scatter_image(self.comm, owners[1], image)
         return image
@@ -407,7 +408,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -428,7 +429,7 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
+                i = node.node_id
                 if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
@@ -479,31 +480,27 @@
         splitdims = []
         splitposs = []
         for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
+            nids.append(node.node_id) 
             les.append(node.left_edge) 
             res.append(node.right_edge) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -532,7 +529,7 @@
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 1833903a59d29bddfdb79af67c209763db45221e -r e6a87c284bc75ac8440712b88c2623602ff09527 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -1,14 +1,33 @@
+"""
+AMR kD-Tree Cython Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
 import numpy as np
 cimport numpy as np
 cimport cython
-from libc.stdlib cimport malloc, free, abs
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
-from field_interpolation_tables cimport \
-    FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\
-    FIT_eval_transfer_with_light
-from fixed_interpolator cimport *
-
-from cython.parallel import prange, parallel, threadid
+from libc.stdlib cimport malloc, free
 
 cdef extern from "stdlib.h":
     # NOTE that size_t might not be int
@@ -30,7 +49,7 @@
     cdef readonly Node right
     cdef readonly Node parent
     cdef readonly int grid
-    cdef readonly int node_id
+    cdef readonly long node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -43,7 +62,7 @@
                   np.ndarray[np.float64_t, ndim=1] left_edge,
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   int grid,
-                  int node_id):
+                  long node_id):
         self.left = left
         self.right = right
         self.parent = parent
@@ -62,6 +81,23 @@
                                    self.right_edge[2])
         print '\t grid: %i' % self.grid
 
+    def get_split_dim(self):
+        try: 
+            return self.split.dim
+        except:
+            return -1
+    
+    def get_split_pos(self):
+        try: 
+            return self.split.pos
+        except:
+            return np.nan
+
+    def create_split(self, dim, pos):
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = dim 
+        split.pos = pos
+        self.split = split
 
 def get_left_edge(Node node):
     le = np.empty(3, dtype='float64')
@@ -78,19 +114,19 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _lchild_id(int node_id):
+cdef long _lchild_id(long node_id):
     return (node_id<<1)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _rchild_id(int node_id):
+cdef long _rchild_id(long node_id):
     return (node_id<<1) + 1
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-def _parent_id(int node_id):
+cdef long _parent_id(long node_id):
     return (node_id-1) >> 1
 
 @cython.boundscheck(False)
@@ -174,9 +210,10 @@
         return
 
     # If we should continue to split based on parallelism, do so!
-    # if should_i_split(node, rank, size):
-    #     geo_split(node, gle, gre, grid_id, rank, size)
-    #     return
+    if should_i_split(node, rank, size):
+        geo_split(node, gle, gre, grid_id, rank, size)
+        return
+
     cdef int contained = 1
     for i in range(3):
         if gle[i] > node.left_edge[i] or\
@@ -325,9 +362,9 @@
 
     if ngrids == 1:
         # If we should continue to split based on parallelism, do so!
-        #if should_i_split(node, rank, size):
-        #    geo_split(node, gles, gres, grid_ids, rank, size)
-        #    return
+        if should_i_split(node, rank, size):
+            geo_split(node, gles[0], gres[0], gids[0], rank, size)
+            return
 
         for i in range(3):
             contained *= gles[0][i] <= node.left_edge[i]
@@ -384,8 +421,6 @@
     split.dim = best_dim
     split.pos = split_pos
 
-    #del data
-
     # Create a Split
     divide(node, split)
 
@@ -570,57 +605,54 @@
 
     return 0
 
-# def geo_split_grid(node, gle, gre, grid_id, rank, size):
-#     big_dim = np.argmax(gre-gle)
-#     new_pos = (gre[big_dim] + gle[big_dim])/2.
-#     old_gre = gre.copy()
-#     new_gle = gle.copy()
-#     new_gle[big_dim] = new_pos
-#     gre[big_dim] = new_pos
-# 
-#     split = Split(big_dim, new_pos)
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     insert_grid(node.left, gle, gre,
-#                 grid_id, rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     insert_grid(node.right, new_gle, old_gre,
-#                 grid_id, rank, size)
-#     return
-# 
-# 
-# def geo_split(node, gles, gres, grid_ids, rank, size):
-#     big_dim = np.argmax(gres[0]-gles[0])
-#     new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-#     old_gre = gres[0].copy()
-#     new_gle = gles[0].copy()
-#     new_gle[big_dim] = new_pos
-#     gres[0][big_dim] = new_pos
-#     gles = np.append(gles, np.array([new_gle]), axis=0)
-#     gres = np.append(gres, np.array([old_gre]), axis=0)
-#     grid_ids = np.append(grid_ids, grid_ids, axis=0)
-# 
-#     split = Split(big_dim, new_pos)
-# 
-#     # Create a Split
-#     divide(node, split)
-# 
-#     # Populate Left Node
-#     #print 'Inserting left node', node.left_edge, node.right_edge
-#     insert_grids(node.left, gles[:1], gres[:1],
-#             grid_ids[:1], rank, size)
-# 
-#     # Populate Right Node
-#     #print 'Inserting right node', node.left_edge, node.right_edge
-#     insert_grids(node.right, gles[1:], gres[1:],
-#             grid_ids[1:], rank, size)
-#     return
+cdef geo_split(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int grid_id, 
+               int rank, 
+               int size):
+    cdef int big_dim = 0
+    cdef int i
+    cdef np.float64_t v, my_max = 0.0
+    
+    for i in range(3):
+        v = gre[i] - gle[i]
+        if v > my_max:
+            my_max = v
+            big_dim = i
+
+    new_pos = (gre[big_dim] + gle[big_dim])/2.
+    
+    lnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    for j in range(3):
+        lnew_gle[j] = gle[j]
+        lnew_gre[j] = gre[j]
+        rnew_gle[j] = gle[j]
+        rnew_gre[j] = gre[j]
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = big_dim 
+    split.pos = new_pos
+
+    # Create a Split
+    divide(node, split)
+
+    #lnew_gre[big_dim] = new_pos
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grid(node.left, lnew_gle, lnew_gre, 
+            grid_id, rank, size)
+
+    #rnew_gle[big_dim] = new_pos 
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grid(node.right, rnew_gle, rnew_gre,
+            grid_id, rank, size)
+    return
 
 cdef new_right(Node node, Split * split):
     new_right = Node.right_edge.copy()


https://bitbucket.org/yt_analysis/yt-3.0/commits/97dd5cf35207/
Changeset:   97dd5cf35207
Branch:      yt
User:        samskillman
Date:        2013-06-05 23:56:09
Summary:     Few more cleanup items. Now streamlines pass again.
Affected #:  4 files

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -13,10 +13,14 @@
     cs = np.array([a.ravel() for a in cs]).T
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
+        print nprocs
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
         streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
+        print 'I did it.'
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
+            print path['dts'].sum()
+            print np.all(path['t'] <= (1.0 + 1e-10))
             path["Density"]

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -30,7 +30,8 @@
         receive_and_reduce, send_to_parent, scatter_image
 
 from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
-        kd_is_leaf, depth_traverse, viewpoint_traverse, kd_traverse, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
         get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
@@ -479,10 +480,10 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
+        for node in depth_first_touch(self.tree.trunk):
             nids.append(node.node_id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
@@ -517,14 +518,18 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -45,11 +45,11 @@
 @cython.cdivision(True)
 cdef class Node:
 
-    cdef readonly Node left
-    cdef readonly Node right
-    cdef readonly Node parent
-    cdef readonly int grid
-    cdef readonly long node_id
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public long node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -72,6 +72,7 @@
             self.right_edge[i] = right_edge[i]
         self.grid = grid
         self.node_id = node_id
+        self.split == NULL
 
     def print_me(self):
         print 'Node %i' % self.node_id
@@ -82,12 +83,34 @@
         print '\t grid: %i' % self.grid
 
     def get_split_dim(self):
-        try: 
+        if self.split != NULL:
             return self.split.dim
-        except:
+        else:
             return -1
     
     def get_split_pos(self):
+        if self.split != NULL:
+            return self.split.pos
+        else:
+            return np.nan
+
+    def get_left_edge(self):
+        return get_left_edge(self)
+    
+    def get_right_edge(self):
+        return get_right_edge(self)
+
+    def set_left_edge(self, np.ndarray[np.float64_t, ndim=1] left_edge):
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+    
+    def set_right_edge(self, np.ndarray[np.float64_t, ndim=1] right_edge):
+        cdef int i
+        for i in range(3):
+            self.right_edge[i] = right_edge[i]
+    
+    def get_split_pos(self):
         try: 
             return self.split.pos
         except:
@@ -654,16 +677,6 @@
             grid_id, rank, size)
     return
 
-cdef new_right(Node node, Split * split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-cdef new_left(Node node, Split * split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
 cdef void divide(Node node, Split * split):
     # Create a Split
     node.split = split
@@ -762,12 +775,12 @@
             current = current.parent
             previous = current.right
 
-def depth_first_touch(tree, max_node=None):
+def depth_first_touch(Node tree, max_node=None):
     '''
     Yields a depth-first traversal of the kd tree always going to
     the left child before the right.
     '''
-    current = tree.trunk
+    current = tree
     previous = None
     if max_node is None:
         max_node = np.inf
@@ -776,23 +789,23 @@
             yield current
         current, previous = step_depth(current, previous)
         if current is None: break
-        if current.id >= max_node:
+        if current.node_id >= max_node:
             current = current.parent
             previous = current.right
 
-def breadth_traverse(tree):
+def breadth_traverse(Node tree):
     '''
     Yields a breadth-first traversal of the kd tree always going to
     the left child before the right.
     '''
-    current = tree.trunk
+    current = tree
     previous = None
     while current is not None:
         yield current
         current, previous = step_depth(current, previous)
 
 
-def viewpoint_traverse(tree, viewpoint):
+def viewpoint_traverse(Node tree, viewpoint):
     '''
     Yields a viewpoint dependent traversal of the kd-tree.  Starts
     with nodes furthest away from viewpoint.

diff -r e6a87c284bc75ac8440712b88c2623602ff09527 -r 97dd5cf35207eb4420277f9000a0a01165156048 yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -169,8 +169,8 @@
                    np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if np.any(stream[-step+1,:] < node.left_edge) | \
-                   np.any(stream[-step+1,:] >= node.right_edge):
+            if np.any(stream[-step+1,:] < node.get_left_edge()) | \
+                   np.any(stream[-step+1,:] >= node.get_right_edge()):
                 return step-1
             step -= 1
         return step


https://bitbucket.org/yt_analysis/yt-3.0/commits/de67793f831e/
Changeset:   de67793f831e
Branch:      yt
User:        samskillman
Date:        2013-06-06 18:22:35
Summary:     Addressing a few comments. Removed print statements. Changed image reduction to
a while loop with and else, which is pretty sweet. Removed an import * in __init__.
Affected #:  3 files

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/data_objects/tests/test_streamlines.py
--- a/yt/data_objects/tests/test_streamlines.py
+++ b/yt/data_objects/tests/test_streamlines.py
@@ -13,14 +13,10 @@
     cs = np.array([a.ravel() for a in cs]).T
     length = (1.0/128) * 16 # 16 half-widths of a cell
     for nprocs in [1, 2, 4, 8]:
-        print nprocs
         pf = fake_random_pf(64, nprocs = nprocs, fields = _fields)
         streams = Streamlines(pf, cs, length=length)
         streams.integrate_through_volume()
-        print 'I did it.'
         for path in (streams.path(i) for i in range(8)):
             yield assert_rel_equal, path['dts'].sum(), 1.0, 14
             yield assert_equal, np.all(path['t'] <= (1.0 + 1e-10)), True
-            print path['dts'].sum()
-            print np.all(path['t'] <= (1.0 + 1e-10))
             path["Density"]

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -250,20 +250,19 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.node_id] == myrank:
-                split_dim = node.parent.get_split_dim()
-                split_pos = node.parent.get_split_pos()
-                left_in_front = viewpoint[split_dim] < split_pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
-                                  image, add_to_front)
-                if node.parent.node_id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.node_id], image)
-                break
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            left_in_front = viewpoint[split_dim] < split_pos
+            #add_to_front = (left_in_front == (node == node.parent.right))
+            add_to_front = not left_in_front
+            image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
+                              image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
         image = scatter_image(self.comm, owners[1], image)
         return image
 

diff -r 97dd5cf35207eb4420277f9000a0a01165156048 -r de67793f831e5d77f48b6a0730c3939fd03ec94c yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -40,4 +40,3 @@
 from .marching_cubes import *
 from .GridTree import *
 from .write_array import *
-from .amr_kdtools import *


https://bitbucket.org/yt_analysis/yt-3.0/commits/e6acf09c0c9f/
Changeset:   e6acf09c0c9f
Branch:      yt
User:        samskillman
Date:        2013-06-06 19:04:35
Summary:     Simplifying some of the reduction logic, fixing parallel perspective renders.
The scattered image was not being returned from the finalize correctly.
Affected #:  2 files

diff -r de67793f831e5d77f48b6a0730c3939fd03ec94c -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -253,18 +253,16 @@
         while owners[node.parent.node_id] == myrank:
             split_dim = node.parent.get_split_dim()
             split_pos = node.parent.get_split_pos()
-            left_in_front = viewpoint[split_dim] < split_pos
-            #add_to_front = (left_in_front == (node == node.parent.right))
-            add_to_front = not left_in_front
-            image = receive_and_reduce(self.comm, owners[node.parent.right.node_id],
-                              image, add_to_front)
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
             if node.parent.node_id == 1: break
             else: node = node.parent
         else:
             send_to_parent(self.comm, owners[node.parent.node_id], image)
 
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data

diff -r de67793f831e5d77f48b6a0730c3939fd03ec94c -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1074,19 +1074,22 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center
         for brick in self.volume.traverse(self.front_center):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
-        image = sampler.aimage
-        self.finalize_image(image)
+        image = self.finalize_image(sampler.aimage)
         return image
 
     def finalize_image(self, image):
+        view_pos = self.front_center
         image.shape = self.resolution[0], self.resolution[0], 4
+        image = self.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:,:,3]=1.0
+        return image
 
 def corners(left_edge, right_edge):
     return np.array([


https://bitbucket.org/yt_analysis/yt-3.0/commits/ea98c7a011e2/
Changeset:   ea98c7a011e2
Branch:      yt
User:        samskillman
Date:        2013-06-06 20:51:24
Summary:     long -> np.int64_t, malloc -> alloca. now has the memory of a fish.
Affected #:  1 file

diff -r e6acf09c0c9f9b46153862a05874f2a5b977ede5 -r ea98c7a011e2eaa8d7943987c51c727f7025f6b0 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -49,7 +49,7 @@
     cdef public Node right
     cdef public Node parent
     cdef public int grid
-    cdef public long node_id
+    cdef public np.int64_t node_id
     cdef np.float64_t left_edge[3]
     cdef np.float64_t right_edge[3]
     cdef public data
@@ -62,7 +62,7 @@
                   np.ndarray[np.float64_t, ndim=1] left_edge,
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   int grid,
-                  long node_id):
+                  np.int64_t node_id):
         self.left = left
         self.right = right
         self.parent = parent
@@ -109,12 +109,6 @@
         cdef int i
         for i in range(3):
             self.right_edge[i] = right_edge[i]
-    
-    def get_split_pos(self):
-        try: 
-            return self.split.pos
-        except:
-            return np.nan
 
     def create_split(self, dim, pos):
         split = <Split *> malloc(sizeof(Split))
@@ -122,6 +116,9 @@
         split.pos = pos
         self.split = split
 
+    def __dealloc__(self):
+        if self.split != NULL: free(self.split)
+
 def get_left_edge(Node node):
     le = np.empty(3, dtype='float64')
     for i in range(3):
@@ -137,19 +134,19 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _lchild_id(long node_id):
+cdef inline np.int64_t _lchild_id(np.int64_t node_id):
     return (node_id<<1)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _rchild_id(long node_id):
+cdef inline np.int64_t _rchild_id(np.int64_t node_id):
     return (node_id<<1) + 1
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
-cdef long _parent_id(long node_id):
+cdef inline np.int64_t _parent_id(np.int64_t node_id):
     return (node_id-1) >> 1
 
 @cython.boundscheck(False)
@@ -211,8 +208,8 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    pgres = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
     cdef int j
     for j in range(3):
         pgles[j] = gle[j]
@@ -270,12 +267,12 @@
     The entire purpose of this function is to move everything from ndarrays
     to internal C pointers. 
     """
-    pgles = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
-    pgres = <np.float64_t **> malloc(ngrids * sizeof(np.float64_t*))
-    pgids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
     for i in range(ngrids):
-        pgles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        pgres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         pgids[i] = gids[i]
         for j in range(3):
             pgles[i][j] = gles[i, j]
@@ -304,8 +301,8 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
-    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    less_ids= <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -321,19 +318,19 @@
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -418,16 +415,16 @@
                int size):
 
     cdef int j
-    data = <np.float64_t ***> malloc(sizeof(np.float64_t**))
-    data[0] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+    data = <np.float64_t ***> alloca(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
     for j in range(2):
         data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
     for j in range(3):
         data[0][0][j] = gle[j]
         data[0][1][j] = gre[j]
 
-    less_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
-    greater_ids = <np.uint8_t *> malloc(1 * sizeof(np.uint8_t))
+    less_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
 
     best_dim, split_pos, nless, ngreater = \
         kdtree_get_choices(1, data, node.left_edge, node.right_edge,
@@ -540,17 +537,17 @@
     # Find a Split
     cdef int i, j, k
 
-    data = <np.float64_t ***> malloc(ngrids * sizeof(np.float64_t**))
+    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
     for i in range(ngrids):
-        data[i] = <np.float64_t **> malloc(2 * sizeof(np.float64_t*))
+        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
         for j in range(2):
             data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
         for j in range(3):
             data[i][0][j] = gles[i][j]
             data[i][1][j] = gres[i][j]
 
-    less_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
-    greater_ids = <np.uint8_t *> malloc(ngrids * sizeof(np.uint8_t))
+    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
 
     best_dim, split_pos, nless, ngreater = \
         kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
@@ -571,8 +568,8 @@
     # Create a Split
     divide(node, split)
 
-    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
-    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    less_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -585,19 +582,19 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -646,10 +643,10 @@
 
     new_pos = (gre[big_dim] + gle[big_dim])/2.
     
-    lnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    lnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    rnew_gle = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
-    rnew_gre = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+    lnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
 
     for j in range(3):
         lnew_gle[j] = gle[j]


https://bitbucket.org/yt_analysis/yt-3.0/commits/1ae43f835321/
Changeset:   1ae43f835321
Branch:      yt
User:        samskillman
Date:        2013-06-06 22:08:32
Summary:     alloca was actually not working as I expected, so I reverted to malloc. Also
fixing should_i_split for a corner case when the node_id is > 2^64. not a great
fix.
Affected #:  1 file

diff -r ea98c7a011e2eaa8d7943987c51c727f7025f6b0 -r 1ae43f8353213e5e3120293feab9ded8e084c594 yt/utilities/lib/amr_kdtools.pyx
--- a/yt/utilities/lib/amr_kdtools.pyx
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -301,8 +301,8 @@
         insert_grids(node, ngrids, gles, gres, gids, rank, size)
         return
 
-    less_ids= <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
-    greater_ids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -318,19 +318,19 @@
     #print 'nless: %i' % nless
     #print 'ngreater: %i' % ngreater
 
-    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -354,13 +354,29 @@
     if ngreater > 0:
         add_grids(node.right, ngreater, greater_gles, greater_gres,
                   g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_ids)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_ids)
+    free(greater_gles)
+    free(greater_gres)
+
     return
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
 cdef int should_i_split(Node node, int rank, int size):
-    if node.node_id < size:
+    if node.node_id < size and node.node_id > 0:
         return 1
     return 0
 
@@ -568,8 +584,8 @@
     # Create a Split
     divide(node, split)
 
-    less_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
-    greater_index = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
    
     nless = 0
     ngreater = 0
@@ -582,19 +598,19 @@
             greater_index[ngreater] = i
             ngreater += 1
 
-    less_gles = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    less_gres = <np.float64_t **> alloca(nless * sizeof(np.float64_t*))
-    l_ids = <np.int64_t *> alloca(nless * sizeof(np.int64_t))
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
     for i in range(nless):
-        less_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        less_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
-    greater_gles = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    greater_gres = <np.float64_t **> alloca(ngreater * sizeof(np.float64_t*))
-    g_ids = <np.int64_t *> alloca(ngreater * sizeof(np.int64_t))
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
     for i in range(ngreater):
-        greater_gles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
-        greater_gres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
 
     cdef int index
     for i in range(nless):
@@ -623,6 +639,22 @@
         insert_grids(node.right, ngreater, greater_gles, greater_gres,
                      g_ids, rank, size)
 
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_index)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_index)
+    free(greater_gles)
+    free(greater_gres)
+
+
     return 0
 
 cdef geo_split(Node node, 


https://bitbucket.org/yt_analysis/yt-3.0/commits/55ba8471e0c3/
Changeset:   55ba8471e0c3
Branch:      yt
User:        samskillman
Date:        2013-06-13 20:19:26
Summary:     Merging and resolving simple conflict
Affected #:  19 files

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -143,10 +143,10 @@
             return self.CoM
         pm = self["ParticleMassMsun"]
         c = {}
-        c[0] = self["particle_position_x"]
-        c[1] = self["particle_position_y"]
-        c[2] = self["particle_position_z"]
-        c_vec = np.zeros(3)
+        # We shift into a box where the origin is the left edge
+        c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
+        c[1] = self["particle_position_y"] - self.pf.domain_left_edge[1]
+        c[2] = self["particle_position_z"] - self.pf.domain_left_edge[2]
         com = []
         for i in range(3):
             # A halo is likely periodic around a boundary if the distance 
@@ -159,13 +159,12 @@
                 com.append(c[i])
                 continue
             # Now we want to flip around only those close to the left boundary.
-            d_left = c[i] - self.pf.domain_left_edge[i]
-            sel = (d_left <= (self.pf.domain_width[i]/2))
+            sel = (c[i] <= (self.pf.domain_width[i]/2))
             c[i][sel] += self.pf.domain_width[i]
             com.append(c[i])
         com = np.array(com)
         c = (com * pm).sum(axis=1) / pm.sum()
-        return c%self.pf.domain_width
+        return c%self.pf.domain_width + self.pf.domain_left_edge
 
     def maximum_density(self):
         r"""Return the HOP-identified maximum density. Not applicable to

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -212,7 +212,7 @@
             dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
             if i == (self.num_sigma_bins - 3): break
 
-        self.dis = dis  / self.pf['CosmologyComovingBoxSize']**3.0 * self.hubble0**3.0
+        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
 
     def sigmaM(self):
         """

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -77,6 +77,15 @@
         parses the Orion Star Particle text files
              
         """
+
+        fn = grid.pf.fullplotdir[:-4] + "sink"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -87,15 +96,38 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
 
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+            
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
         fn = grid.pf.fullplotdir[:-4] + "sink"
         with open(fn, 'r') as f:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -163,10 +163,12 @@
                         "angmomen_y",
                         "angmomen_z",
                         "mlast",
+                        "r",
                         "mdeut",
                         "n",
                         "mdot",
                         "burnstate",
+                        "luminosity",
                         "id"]
 
 for pf in _particle_field_list:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -44,6 +44,17 @@
         parses the Orion Star Particle text files
         
         """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
         index = {'particle_mass': 0,
                  'particle_position_x': 1,
                  'particle_position_y': 2,
@@ -54,17 +65,39 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
+                 'particle_id': -1}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
 
         def read(line, field):
-            return float(line.split(' ')[index[field]])
+            return float(line.strip().split(' ')[index[field]])
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
         with open(fn, 'r') as f:
             lines = f.readlines()
             particles = []

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -21,4 +21,9 @@
     config.add_subpackage("castro")
     config.add_subpackage("stream")
     config.add_subpackage("pluto")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -369,6 +369,20 @@
     if ytcfg.getint("yt", cfg_option) > 0: return
     return func(*args, **kwargs)
 
+def is_root():
+    """
+    This function returns True if it is on the root processor of the
+    topcomm and False otherwise.
+    """
+    from yt.config import ytcfg
+    cfg_option = "__topcomm_parallel_rank"
+    if not ytcfg.getboolean("yt","__parallel"):
+        return True
+    if ytcfg.getint("yt", cfg_option) > 0: 
+        return False
+    return True
+
+
 #
 # Our signal and traceback handling functions
 #

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -252,6 +252,7 @@
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":
         gpd = os.environ["GPERFTOOLS"]

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -257,7 +257,7 @@
             try:
                 rv = func(*args, **kwargs)
                 all_clear = 1
-            except:
+            except Exception as ex:
                 traceback.print_last()
                 all_clear = 0
         else:

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/parallel_tools/task_queue.py
--- a/yt/utilities/parallel_tools/task_queue.py
+++ b/yt/utilities/parallel_tools/task_queue.py
@@ -133,14 +133,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))
@@ -161,14 +161,14 @@
     comm = _get_comm(())
     if not parallel_capable:
         mylog.error("Cannot create task queue for serial process.")
-        raise RunTimeError
+        raise RuntimeError
     my_size = comm.comm.size
     if njobs <= 0:
         njobs = my_size - 1
     if njobs >= my_size:
         mylog.error("You have asked for %s jobs, but only %s processors are available.",
                     njobs, (my_size - 1))
-        raise RunTimeError
+        raise RuntimeError
     my_rank = comm.rank
     all_new_comms = np.array_split(np.arange(1, my_size), njobs)
     all_new_comms.insert(0, np.array([0]))

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -55,7 +55,7 @@
     traceback.print_exception(exc_type, exc, tb)
     task = ytcfg.getint("yt", "__global_parallel_rank")
     size = ytcfg.getint("yt", "__global_parallel_size")
-    print "Starting RPDB server on task %s ; connect with 'yt rpdb %s'" \
+    print "Starting RPDB server on task %s ; connect with 'yt rpdb -t %s'" \
             % (task,task)
     handler = pdb_handler(tb)
     server = PdbXMLRPCServer(("localhost", 8010+task))

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -56,6 +56,7 @@
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
                 "yt/utilities/data_point_utilities.c", libraries=["m"])
+    config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -717,7 +717,7 @@
             cbname = callback_registry[key]._type_name
             CallbackMaker = callback_registry[key]
             callback = invalidate_plot(apply_callback(CallbackMaker))
-            callback.__doc__ = CallbackMaker.__init__.__doc__
+            callback.__doc__ = CallbackMaker.__doc__
             self.__dict__['annotate_'+cbname] = types.MethodType(callback,self)
 
     @invalidate_plot

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/setup.py
--- a/yt/visualization/setup.py
+++ b/yt/visualization/setup.py
@@ -7,6 +7,7 @@
     config = Configuration('visualization', parent_package, top_path)
     config.add_subpackage("image_panner")
     config.add_subpackage("volume_rendering")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     config.add_extension("_MPL", "_MPL.c", libraries=["m"])

diff -r 1ae43f8353213e5e3120293feab9ded8e084c594 -r 55ba8471e0c33bab5934ebe4b567680d878fb471 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1110,11 +1110,13 @@
     def __init__(self, center, radius, nside,
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
-                 pf = None, use_kd=True, no_ghost=False, use_light=False):
+                 pf = None, use_kd=True, no_ghost=False, use_light=False,
+                 inner_radius = 10):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
         self.center = np.array(center, dtype='float64')
         self.radius = radius
+        self.inner_radius = inner_radius
         self.nside = nside
         self.use_kd = use_kd
         if transfer_function is None:
@@ -1122,9 +1124,11 @@
         self.transfer_function = transfer_function
 
         if isinstance(self.transfer_function, ProjectionTransferFunction):
-            self._sampler_object = ProjectionSampler
+            self._sampler_object = InterpolatedProjectionSampler
+            self._needs_tf = 0
         else:
             self._sampler_object = VolumeRenderSampler
+            self._needs_tf = 1
 
         if fields is None: fields = ["Density"]
         self.fields = fields
@@ -1148,15 +1152,20 @@
     def get_sampler_args(self, image):
         nv = 12 * self.nside ** 2
         vs = arr_pix2vec_nest(self.nside, np.arange(nv))
-        vs *= self.radius
-        vs.shape = nv, 1, 3
+        vs.shape = (nv, 1, 3)
+        vs += 1e-8
         uv = np.ones(3, dtype='float64')
         positions = np.ones((nv, 1, 3), dtype='float64') * self.center
+        dx = min(g.dds.min() for g in self.pf.h.find_point(self.center)[0])
+        positions += self.inner_radius * dx * vs
+        vs *= self.radius
         args = (positions, vs, self.center,
                 (0.0, 1.0, 0.0, 1.0),
                 image, uv, uv,
-                np.zeros(3, dtype='float64'),
-                self.transfer_function, self.sub_samples)
+                np.zeros(3, dtype='float64'))
+        if self._needs_tf:
+            args += (self.transfer_function,)
+        args += (self.sub_samples,)
         return args
 
     def _render(self, double_check, num_threads, image, sampler):
@@ -1231,28 +1240,14 @@
     def save_image(self, image, fn=None, clim=None, label = None):
         if self.comm.rank == 0 and fn is not None:
             # This assumes Density; this is a relatively safe assumption.
-            import matplotlib.figure
-            import matplotlib.backends.backend_agg
-            phi, theta = np.mgrid[0.0:2*np.pi:800j, 0:np.pi:800j]
-            pixi = arr_ang2pix_nest(self.nside, theta.ravel(), phi.ravel())
-            image *= self.radius * self.pf['cm']
-            img = np.log10(image[:,0,0][pixi]).reshape((800,800))
-
-            fig = matplotlib.figure.Figure((10, 5))
-            ax = fig.add_subplot(1,1,1,projection='hammer')
-            implot = ax.imshow(img, extent=(-np.pi,np.pi,-np.pi/2,np.pi/2), clip_on=False, aspect=0.5)
-            cb = fig.colorbar(implot, orientation='horizontal')
-
-            if label == None:
-                cb.set_label("Projected %s" % self.fields[0])
+            if label is None:
+                label = "Projected %s" % (self.fields[0])
+            if clim is not None:
+                cmin, cmax = clim
             else:
-                cb.set_label(label)
-            if clim is not None: cb.set_clim(*clim)
-            ax.xaxis.set_ticks(())
-            ax.yaxis.set_ticks(())
-            canvas = matplotlib.backends.backend_agg.FigureCanvasAgg(fig)
-            canvas.print_figure(fn)
-
+                cmin = cmax = None
+            plot_allsky_healpix(image[:,0,0], self.nside, fn, label, 
+                                cmin = cmin, cmax = cmax)
 
 class AdaptiveHEALpixCamera(Camera):
     def __init__(self, center, radius, nside,
@@ -2022,7 +2017,7 @@
     nv = 12*nside**2
     image = np.zeros((nv,1,4), dtype='float64', order='C')
     vs = arr_pix2vec_nest(nside, np.arange(nv))
-    vs.shape = (nv,1,3)
+    vs.shape = (nv, 1, 3)
     if rotation is not None:
         vs2 = vs.copy()
         for i in range(3):


https://bitbucket.org/yt_analysis/yt-3.0/commits/a5f16d327d7e/
Changeset:   a5f16d327d7e
Branch:      yt
User:        MatthewTurk
Date:        2013-07-01 16:01:22
Summary:     Merged in samskillman/yt (pull request #525)

Cythonize the AMRKDTree build
Affected #:  9 files

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
-    has_l_child = node.left is None
-    has_r_child = node.right is None
-    assert has_l_child == has_r_child
-    return has_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,13 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch, add_grid
+from amr_kdtools import \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -67,12 +70,11 @@
         self.comm_rank = comm_rank
         self.comm_size = comm_size
         self.trunk = Node(None, None, None,
-                left, right, None, 1)
+                left, right, -1, 1)
         if grids is None:
-            self.grids = pf.h.region((left+right)/2., left, right)._grids
-        else:
-            self.grids = grids
-        self.build(grids)
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
         lvl_range = range(self.min_level, self.max_level+1)
@@ -91,7 +93,8 @@
                     gles = np.array([g.LeftEdge for g in grids])[gmask]
                     gres = np.array([g.RightEdge for g in grids])[gmask]
                     gids = np.array([g.id for g in grids])[gmask]
-                    add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
                               self.comm_size)
                     grids_added += grids.size
                     del gles, gres, gids, grids
@@ -99,31 +102,35 @@
                     grids_added += grids.size
                     [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
                               self.comm_rank, self.comm_size) for g in grids]
-        else:
-            gles = np.array([g.LeftEdge for g in grids])
-            gres = np.array([g.RightEdge for g in grids])
-            gids = np.array([g.id for g in grids])
+            return
 
-            add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids, grids
+        for lvl in lvl_range:
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
 
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
 
-    def build(self, grids = None):
+
+    def build(self, grids=None):
         self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -134,19 +141,20 @@
 
     def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):
@@ -204,14 +212,8 @@
         self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -232,13 +234,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -248,33 +250,32 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
-                                  image, add_to_front)
-                if node.parent.id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
-                break
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
@@ -292,8 +293,8 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
         if not self._initialized: self.brick_dimensions.append(dims)
@@ -405,7 +406,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -426,8 +427,8 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
-                if node.grid is not None:
+                i = node.node_id
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
@@ -476,32 +477,28 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+        for node in depth_first_touch(self.tree.trunk):
+            nids.append(node.node_id) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -518,19 +515,23 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/lib/amr_kdtools.pyx
--- /dev/null
+++ b/yt/utilities/lib/amr_kdtools.pyx
@@ -0,0 +1,921 @@
+"""
+AMR kD-Tree Cython Tools
+
+Authors: Samuel Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2013 Samuel Skillman.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+
+DEF Nch = 4
+
+cdef struct Split:
+    int dim
+    np.float64_t pos
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef class Node:
+
+    cdef public Node left
+    cdef public Node right
+    cdef public Node parent
+    cdef public int grid
+    cdef public np.int64_t node_id
+    cdef np.float64_t left_edge[3]
+    cdef np.float64_t right_edge[3]
+    cdef public data
+    cdef Split * split
+
+    def __cinit__(self, 
+                  Node parent, 
+                  Node left, 
+                  Node right, 
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  int grid,
+                  np.int64_t node_id):
+        self.left = left
+        self.right = right
+        self.parent = parent
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+            self.right_edge[i] = right_edge[i]
+        self.grid = grid
+        self.node_id = node_id
+        self.split == NULL
+
+    def print_me(self):
+        print 'Node %i' % self.node_id
+        print '\t le: %e %e %e' % (self.left_edge[0], self.left_edge[1], 
+                                   self.left_edge[2])
+        print '\t re: %e %e %e' % (self.right_edge[0], self.right_edge[1], 
+                                   self.right_edge[2])
+        print '\t grid: %i' % self.grid
+
+    def get_split_dim(self):
+        if self.split != NULL:
+            return self.split.dim
+        else:
+            return -1
+    
+    def get_split_pos(self):
+        if self.split != NULL:
+            return self.split.pos
+        else:
+            return np.nan
+
+    def get_left_edge(self):
+        return get_left_edge(self)
+    
+    def get_right_edge(self):
+        return get_right_edge(self)
+
+    def set_left_edge(self, np.ndarray[np.float64_t, ndim=1] left_edge):
+        cdef int i
+        for i in range(3):
+            self.left_edge[i] = left_edge[i]
+    
+    def set_right_edge(self, np.ndarray[np.float64_t, ndim=1] right_edge):
+        cdef int i
+        for i in range(3):
+            self.right_edge[i] = right_edge[i]
+
+    def create_split(self, dim, pos):
+        split = <Split *> malloc(sizeof(Split))
+        split.dim = dim 
+        split.pos = pos
+        self.split = split
+
+    def __dealloc__(self):
+        if self.split != NULL: free(self.split)
+
+def get_left_edge(Node node):
+    le = np.empty(3, dtype='float64')
+    for i in range(3):
+        le[i] = node.left_edge[i]
+    return le
+
+def get_right_edge(Node node):
+    re = np.empty(3, dtype='float64')
+    for i in range(3):
+        re[i] = node.right_edge[i]
+    return re
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _lchild_id(np.int64_t node_id):
+    return (node_id<<1)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _rchild_id(np.int64_t node_id):
+    return (node_id<<1) + 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef inline np.int64_t _parent_id(np.int64_t node_id):
+    return (node_id-1) >> 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_build(Node node, int rank, int size):
+    if (node.node_id < size) or (node.node_id >= 2*size):
+        return 1 
+    elif node.node_id - size == rank:
+        return 1 
+    else:
+        return 0 
+
+def kd_traverse(Node trunk, viewpoint=None):
+    if viewpoint is None:
+        for node in depth_traverse(trunk):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+    else:
+        for node in viewpoint_traverse(trunk, viewpoint):
+            if kd_is_leaf(node) and node.grid != -1:
+                yield node
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grid(Node node, 
+                   np.float64_t *gle, 
+                   np.float64_t *gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grid(node, gle, gre, gid, rank, size)
+    else:
+        less_id = gle[node.split.dim] < node.split.pos
+        if less_id:
+            add_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+        greater_id = gre[node.split.dim] > node.split.pos
+        if greater_id:
+            add_grid(node.right, gle, gre,
+                     gid, rank, size)
+    return
+
+def add_pygrid(Node node, 
+                   np.ndarray[np.float64_t, ndim=1] gle, 
+                   np.ndarray[np.float64_t, ndim=1] gre, 
+                   int gid, 
+                   int rank,
+                   int size):
+
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    pgres = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    cdef int j
+    for j in range(3):
+        pgles[j] = gle[j]
+        pgres[j] = gre[j]
+
+    add_grid(node, pgles, pgres, gid, rank, size)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef insert_grid(Node node, 
+                np.float64_t *gle, 
+                np.float64_t *gre, 
+                int grid_id, 
+                int rank,
+                int size):
+    if not should_i_build(node, rank, size):
+        return
+
+    # If we should continue to split based on parallelism, do so!
+    if should_i_split(node, rank, size):
+        geo_split(node, gle, gre, grid_id, rank, size)
+        return
+
+    cdef int contained = 1
+    for i in range(3):
+        if gle[i] > node.left_edge[i] or\
+           gre[i] < node.right_edge[i]:
+            contained *= 0
+
+    if contained == 1:
+        node.grid = grid_id 
+        assert(node.grid != -1)
+        return
+
+    # Split the grid
+    cdef int check = split_grid(node, gle, gre, grid_id, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1 
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def add_pygrids(Node node, 
+                    int ngrids,
+                    np.ndarray[np.float64_t, ndim=2] gles, 
+                    np.ndarray[np.float64_t, ndim=2] gres, 
+                    np.ndarray[np.int64_t, ndim=1] gids, 
+                    int rank,
+                    int size):
+    """
+    The entire purpose of this function is to move everything from ndarrays
+    to internal C pointers. 
+    """
+    pgles = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgres = <np.float64_t **> alloca(ngrids * sizeof(np.float64_t*))
+    pgids = <np.int64_t *> alloca(ngrids * sizeof(np.int64_t))
+    for i in range(ngrids):
+        pgles[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgres[i] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        pgids[i] = gids[i]
+        for j in range(3):
+            pgles[i][j] = gles[i, j]
+            pgres[i][j] = gres[i, j]
+
+    add_grids(node, ngrids, pgles, pgres, pgids, rank, size)
+
+
+ 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef add_grids(Node node, 
+                    int ngrids,
+                    np.float64_t **gles, 
+                    np.float64_t **gres, 
+                    np.int64_t *gids, 
+                    int rank,
+                    int size):
+    cdef int i, j, nless, ngreater
+    cdef np.int64_t gid
+    if not should_i_build(node, rank, size):
+        return
+
+    if kd_is_leaf(node):
+        insert_grids(node, ngrids, gles, gres, gids, rank, size)
+        return
+
+    less_ids= <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_ids = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if gles[i][node.split.dim] < node.split.pos:
+            less_ids[nless] = i
+            nless += 1
+            
+        if gres[i][node.split.dim] > node.split.pos:
+            greater_ids[ngreater] = i
+            ngreater += 1
+
+    #print 'nless: %i' % nless
+    #print 'ngreater: %i' % ngreater
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_ids[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        add_grids(node.left, nless, less_gles, less_gres,
+                  l_ids, rank, size)
+            
+    for i in range(ngreater):
+        index = greater_ids[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        add_grids(node.right, ngreater, greater_gles, greater_gres,
+                  g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_ids)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_ids)
+    free(greater_gles)
+    free(greater_gres)
+
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef int should_i_split(Node node, int rank, int size):
+    if node.node_id < size and node.node_id > 0:
+        return 1
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void insert_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    
+    if not should_i_build(node, rank, size) or ngrids == 0:
+        return
+    cdef int contained = 1
+    cdef int check
+
+    if ngrids == 1:
+        # If we should continue to split based on parallelism, do so!
+        if should_i_split(node, rank, size):
+            geo_split(node, gles[0], gres[0], gids[0], rank, size)
+            return
+
+        for i in range(3):
+            contained *= gles[0][i] <= node.left_edge[i]
+            contained *= gres[0][i] >= node.right_edge[i]
+    
+        if contained == 1:
+            # print 'Node fully contained, setting to grid: %i' % gids[0]
+            node.grid = gids[0]
+            assert(node.grid != -1)
+            return
+
+    # Split the grids
+    check = split_grids(node, ngrids, gles, gres, gids, rank, size)
+    # If check is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if check == -1:
+        node.grid = -1
+    return
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef split_grid(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int gid, 
+               int rank,
+               int size):
+
+    cdef int j
+    data = <np.float64_t ***> alloca(sizeof(np.float64_t**))
+    data[0] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+    for j in range(2):
+        data[0][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    for j in range(3):
+        data[0][0][j] = gle[j]
+        data[0][1][j] = gre[j]
+
+    less_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(1 * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(1, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grid.'
+        return -1
+
+        
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    # Create a Split
+    divide(node, split)
+
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    if nless == 1:
+        insert_grid(node.left, gle, gre,
+                     gid, rank, size)
+
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    if ngreater == 1:
+        insert_grid(node.right, gle, gre,
+                     gid, rank, size)
+
+    return 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef kdtree_get_choices(int n_grids, 
+                        np.float64_t ***data,
+                        np.float64_t *l_corner,
+                        np.float64_t *r_corner,
+                        np.uint8_t *less_ids,
+                        np.uint8_t *greater_ids,
+                       ):
+    cdef int i, j, k, dim, n_unique, best_dim, n_best, addit, my_split
+    cdef np.float64_t **uniquedims, *uniques, split
+    uniquedims = <np.float64_t **> alloca(3 * sizeof(np.float64_t*))
+    for i in range(3):
+        uniquedims[i] = <np.float64_t *> \
+                alloca(2*n_grids * sizeof(np.float64_t))
+    my_max = 0
+    my_split = 0
+    best_dim = -1
+    for dim in range(3):
+        n_unique = 0
+        uniques = uniquedims[dim]
+        for i in range(n_grids):
+            # Check for disqualification
+            for j in range(2):
+                # print "Checking against", i,j,dim,data[i,j,dim]
+                if not (l_corner[dim] < data[i][j][dim] and
+                        data[i][j][dim] < r_corner[dim]):
+                    # print "Skipping ", data[i,j,dim], l_corner[dim], r_corner[dim]
+                    continue
+                skipit = 0
+                # Add our left ...
+                for k in range(n_unique):
+                    if uniques[k] == data[i][j][dim]:
+                        skipit = 1
+                        # print "Identified", uniques[k], data[i,j,dim], n_unique
+                        break
+                if skipit == 0:
+                    uniques[n_unique] = data[i][j][dim]
+                    n_unique += 1
+        if n_unique > my_max:
+            best_dim = dim
+            my_max = n_unique
+            my_split = (n_unique-1)/2
+    # I recognize how lame this is.
+    cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
+    for i in range(my_max):
+        # print "Setting tarr: ", i, uniquedims[best_dim][i]
+        tarr[i] = uniquedims[best_dim][i]
+    tarr.sort()
+    split = tarr[my_split]
+    cdef int nless=0, ngreater=0
+    for i in range(n_grids):
+        if data[i][0][best_dim] < split:
+            less_ids[i] = 1
+            nless += 1
+        else:
+            less_ids[i] = 0
+        if data[i][1][best_dim] > split:
+            greater_ids[i] = 1
+            ngreater += 1
+        else:
+            greater_ids[i] = 0
+    # Return out unique values
+    return best_dim, split, nless, ngreater
+
+#@cython.boundscheck(False)
+#@cython.wraparound(False)
+#@cython.cdivision(True)
+cdef int split_grids(Node node, 
+                       int ngrids,
+                       np.float64_t **gles, 
+                       np.float64_t **gres, 
+                       np.int64_t *gids, 
+                       int rank,
+                       int size):
+    # Find a Split
+    cdef int i, j, k
+
+    data = <np.float64_t ***> alloca(ngrids * sizeof(np.float64_t**))
+    for i in range(ngrids):
+        data[i] = <np.float64_t **> alloca(2 * sizeof(np.float64_t*))
+        for j in range(2):
+            data[i][j] = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+        for j in range(3):
+            data[i][0][j] = gles[i][j]
+            data[i][1][j] = gres[i][j]
+
+    less_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+    greater_ids = <np.uint8_t *> alloca(ngrids * sizeof(np.uint8_t))
+
+    best_dim, split_pos, nless, ngreater = \
+        kdtree_get_choices(ngrids, data, node.left_edge, node.right_edge,
+                          less_ids, greater_ids)
+ 
+    # If best_dim is -1, then we have found a place where there are no choices.
+    # Exit out and set the node to None.
+    if best_dim == -1:
+        print 'Failed to split grids.'
+        return -1
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = best_dim
+    split.pos = split_pos
+
+    #del data
+
+    # Create a Split
+    divide(node, split)
+
+    less_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+    greater_index = <np.int64_t *> malloc(ngrids * sizeof(np.int64_t))
+   
+    nless = 0
+    ngreater = 0
+    for i in range(ngrids):
+        if less_ids[i] == 1:
+            less_index[nless] = i
+            nless += 1
+
+        if greater_ids[i] == 1:
+            greater_index[ngreater] = i
+            ngreater += 1
+
+    less_gles = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    less_gres = <np.float64_t **> malloc(nless * sizeof(np.float64_t*))
+    l_ids = <np.int64_t *> malloc(nless * sizeof(np.int64_t))
+    for i in range(nless):
+        less_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        less_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    greater_gles = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    greater_gres = <np.float64_t **> malloc(ngreater * sizeof(np.float64_t*))
+    g_ids = <np.int64_t *> malloc(ngreater * sizeof(np.int64_t))
+    for i in range(ngreater):
+        greater_gles[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+        greater_gres[i] = <np.float64_t *> malloc(3 * sizeof(np.float64_t))
+
+    cdef int index
+    for i in range(nless):
+        index = less_index[i]
+        l_ids[i] = gids[index]
+        for j in range(3):
+            less_gles[i][j] = gles[index][j]
+            less_gres[i][j] = gres[index][j]
+
+    if nless > 0:
+        # Populate Left Node
+        #print 'Inserting left node', node.left_edge, node.right_edge
+        insert_grids(node.left, nless, less_gles, less_gres,
+                     l_ids, rank, size)
+
+    for i in range(ngreater):
+        index = greater_index[i]
+        g_ids[i] = gids[index]
+        for j in range(3):
+            greater_gles[i][j] = gles[index][j]
+            greater_gres[i][j] = gres[index][j]
+
+    if ngreater > 0:
+        # Populate Right Node
+        #print 'Inserting right node', node.left_edge, node.right_edge
+        insert_grids(node.right, ngreater, greater_gles, greater_gres,
+                     g_ids, rank, size)
+
+    for i in range(nless):
+        free(less_gles[i])
+        free(less_gres[i])
+    free(l_ids)
+    free(less_index)
+    free(less_gles)
+    free(less_gres)
+    for i in range(ngreater):
+        free(greater_gles[i])
+        free(greater_gres[i])
+    free(g_ids)
+    free(greater_index)
+    free(greater_gles)
+    free(greater_gres)
+
+
+    return 0
+
+cdef geo_split(Node node, 
+               np.float64_t *gle, 
+               np.float64_t *gre, 
+               int grid_id, 
+               int rank, 
+               int size):
+    cdef int big_dim = 0
+    cdef int i
+    cdef np.float64_t v, my_max = 0.0
+    
+    for i in range(3):
+        v = gre[i] - gle[i]
+        if v > my_max:
+            my_max = v
+            big_dim = i
+
+    new_pos = (gre[big_dim] + gle[big_dim])/2.
+    
+    lnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    lnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gle = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+    rnew_gre = <np.float64_t *> alloca(3 * sizeof(np.float64_t))
+
+    for j in range(3):
+        lnew_gle[j] = gle[j]
+        lnew_gre[j] = gre[j]
+        rnew_gle[j] = gle[j]
+        rnew_gre[j] = gre[j]
+
+    split = <Split *> malloc(sizeof(Split))
+    split.dim = big_dim 
+    split.pos = new_pos
+
+    # Create a Split
+    divide(node, split)
+
+    #lnew_gre[big_dim] = new_pos
+    # Populate Left Node
+    #print 'Inserting left node', node.left_edge, node.right_edge
+    insert_grid(node.left, lnew_gle, lnew_gre, 
+            grid_id, rank, size)
+
+    #rnew_gle[big_dim] = new_pos 
+    # Populate Right Node
+    #print 'Inserting right node', node.left_edge, node.right_edge
+    insert_grid(node.right, rnew_gle, rnew_gre,
+            grid_id, rank, size)
+    return
+
+cdef void divide(Node node, Split * split):
+    # Create a Split
+    node.split = split
+    
+    cdef np.ndarray[np.float64_t, ndim=1] le = np.zeros(3, dtype='float64')
+    cdef np.ndarray[np.float64_t, ndim=1] re = np.zeros(3, dtype='float64')
+
+    cdef int i
+    for i in range(3):
+        le[i] = node.left_edge[i]
+        re[i] = node.right_edge[i]
+    re[split.dim] = split.pos
+
+    node.left = Node(node, None, None,
+                     le, re, node.grid,
+                     _lchild_id(node.node_id))
+
+    re[split.dim] = node.right_edge[split.dim]
+    le[split.dim] = split.pos
+    node.right = Node(node, None, None,
+                      le, re, node.grid,
+                      _rchild_id(node.node_id))
+
+    return
+# 
+def kd_sum_volume(Node node):
+    cdef np.float64_t vol = 1.0
+    if (node.left is None) and (node.right is None):
+        if node.grid == -1:
+            return 0.0
+        for i in range(3):
+            vol *= node.right_edge[i] - node.left_edge[i]
+        return vol 
+    else:
+        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
+
+def kd_node_check(Node node):
+    assert (node.left is None) == (node.right is None)
+    if (node.left is None) and (node.right is None):
+        if node.grid != -1:
+            return np.prod(node.right_edge - node.left_edge)
+        else: return 0.0
+    else:
+        return kd_node_check(node.left)+kd_node_check(node.right)
+
+def kd_is_leaf(Node node):
+    cdef int has_l_child = node.left == None
+    cdef int has_r_child = node.right == None
+    assert has_l_child == has_r_child
+    return has_l_child
+
+def step_depth(Node current, Node previous):
+    '''
+    Takes a single step in the depth-first traversal
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down, go left first
+        previous = current
+        if current.left is not None:
+            current = current.left
+        elif current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left, go right 
+        previous = current
+        if current.right is not None:
+            current = current.right
+        else:
+            current = current.parent
+
+    elif current.right is previous: # Moving up from right child, move up
+        previous = current
+        current = current.parent
+
+    return current, previous
+ 
+def depth_traverse(Node trunk, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = trunk
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def depth_first_touch(Node tree, max_node=None):
+    '''
+    Yields a depth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    if max_node is None:
+        max_node = np.inf
+    while current is not None:
+        if previous is None or previous.parent != current:
+            yield current
+        current, previous = step_depth(current, previous)
+        if current is None: break
+        if current.node_id >= max_node:
+            current = current.parent
+            previous = current.right
+
+def breadth_traverse(Node tree):
+    '''
+    Yields a breadth-first traversal of the kd tree always going to
+    the left child before the right.
+    '''
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_depth(current, previous)
+
+
+def viewpoint_traverse(Node tree, viewpoint):
+    '''
+    Yields a viewpoint dependent traversal of the kd-tree.  Starts
+    with nodes furthest away from viewpoint.
+    '''
+
+    current = tree
+    previous = None
+    while current is not None:
+        yield current
+        current, previous = step_viewpoint(current, previous, viewpoint)
+
+def step_viewpoint(Node current, 
+                   Node previous, 
+                   viewpoint):
+    '''
+    Takes a single step in the viewpoint based traversal.  Always
+    goes to the node furthest away from viewpoint first.
+    '''
+    if kd_is_leaf(current): # At a leaf, move back up
+        previous = current
+        current = current.parent
+    elif current.split.dim is None: # This is a dead node
+        previous = current
+        current = current.parent
+
+    elif current.parent is previous: # Moving down
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                previous = current.right
+        else:
+            if current.left is not None:
+                current = current.left
+            else:
+                previous = current.left
+
+    elif current.right is previous: # Moving up from right 
+        previous = current
+        if viewpoint[current.split.dim] <= current.split.pos:
+            if current.left is not None:
+                current = current.left
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    elif current.left is previous: # Moving up from left child
+        previous = current
+        if viewpoint[current.split.dim] > current.split.pos:
+            if current.right is not None:
+                current = current.right
+            else:
+                current = current.parent
+        else:
+            current = current.parent
+
+    return current, previous
+
+cdef int point_in_node(Node node, 
+                       np.ndarray[np.float64_t, ndim=1] point):
+    cdef int i
+    cdef int inside = 1
+    for i in range(3):
+        inside *= node.left_edge[i] <= point[i]
+        inside *= node.right_edge[i] > point[i]
+    return inside
+
+
+def find_node(Node node,
+              np.ndarray[np.float64_t, ndim=1] point):
+    """
+    Find the AMRKDTree node enclosing a position
+    """
+    assert(point_in_node(node, point))
+    while not kd_is_leaf(node):
+        if point[node.split.dim] < node.split.pos:
+            node = node.left
+        else:
+            node = node.right
+    return node
+
+

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -249,6 +249,9 @@
     config.add_extension("GridTree", 
     ["yt/utilities/lib/GridTree.pyx"],
         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
+    config.add_extension("amr_kdtools", 
+                         ["yt/utilities/lib/amr_kdtools.pyx"],
+                         libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_subpackage("tests")
 
     if os.environ.get("GPERFTOOLS", "no").upper() != "NO":

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -24,7 +24,8 @@
 """
 
 from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.amr_kdtree.amr_kdtools import depth_traverse
+from yt.utilities.lib.amr_kdtools import depth_traverse, \
+        get_left_edge, get_right_edge
 import yt.utilities.initial_conditions as ic
 import yt.utilities.flagging_methods as fm
 from yt.frontends.stream.api import load_uniform_grid, refine_amr
@@ -53,17 +54,19 @@
 
     # This largely reproduces the AMRKDTree.tree.check_tree() functionality
     tree_ok = True
-    for node in depth_traverse(kd.tree):
+    for node in depth_traverse(kd.tree.trunk):
         if node.grid is None:
             continue
         grid = pf.h.grids[node.grid - kd._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        tree_ok *= np.all(grid.LeftEdge <= node.left_edge)
-        tree_ok *= np.all(grid.RightEdge >= node.right_edge)
+        tree_ok *= np.all(grid.LeftEdge <= nle)
+        tree_ok *= np.all(grid.RightEdge >= nre)
         tree_ok *= np.all(dims > 0)
 
     yield assert_equal, True, tree_ok

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -169,8 +169,8 @@
                    np.any(stream[-step+1,:] >= self.pf.domain_right_edge):
                 return 0
 
-            if np.any(stream[-step+1,:] < node.left_edge) | \
-                   np.any(stream[-step+1,:] >= node.right_edge):
+            if np.any(stream[-step+1,:] < node.get_left_edge()) | \
+                   np.any(stream[-step+1,:] >= node.get_right_edge()):
                 return step-1
             step -= 1
         return step

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r a5f16d327d7e784257019a85bad89437f85ef74d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -1130,19 +1130,22 @@
                     if np.any(np.isnan(data)):
                         raise RuntimeError
 
-        view_pos = self.front_center
         for brick in self.volume.traverse(self.front_center):
             sampler(brick, num_threads=num_threads)
             total_cells += np.prod(brick.my_data[0].shape)
             pbar.update(total_cells)
 
         pbar.finish()
-        image = sampler.aimage
-        self.finalize_image(image)
+        image = self.finalize_image(sampler.aimage)
         return image
 
     def finalize_image(self, image):
+        view_pos = self.front_center
         image.shape = self.resolution[0], self.resolution[0], 4
+        image = self.volume.reduce_tree_images(image, view_pos)
+        if self.transfer_function.grey_opacity is False:
+            image[:,:,3]=1.0
+        return image
 
 def corners(left_edge, right_edge):
     return np.array([


https://bitbucket.org/yt_analysis/yt-3.0/commits/41f2f18daf7c/
Changeset:   41f2f18daf7c
Branch:      yt
User:        MatthewTurk
Date:        2013-06-29 21:26:17
Summary:     This fixes FLASH current redshift, which is not up-to-date until the second
timestep.
Affected #:  1 file

diff -r 8e8bd5b353df99c1510058a7c982329e5212fcfe -r 41f2f18daf7ccef2a83ca7390c8fe1e3b9288b46 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -458,7 +458,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = self.parameters['redshift']
+            self.current_redshift = 1.0/(self.parameters['scalefactor'] - 1.0)
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']


https://bitbucket.org/yt_analysis/yt-3.0/commits/d26d6767486f/
Changeset:   d26d6767486f
Branch:      yt
User:        MatthewTurk
Date:        2013-06-29 21:38:37
Summary:     Oops, parenthesis were in the wrong place!
Affected #:  1 file

diff -r 41f2f18daf7ccef2a83ca7390c8fe1e3b9288b46 -r d26d6767486fe30d79f741538b79a8db6835205f yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -458,7 +458,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = 1.0/(self.parameters['scalefactor'] - 1.0)
+            self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']


https://bitbucket.org/yt_analysis/yt-3.0/commits/74891da30e86/
Changeset:   74891da30e86
Branch:      yt
User:        MatthewTurk
Date:        2013-07-01 16:01:36
Summary:     Merged in MatthewTurk/yt (pull request #537)

This fixes FLASH current redshift
Affected #:  1 file

diff -r a5f16d327d7e784257019a85bad89437f85ef74d -r 74891da30e86ad33ecad7614e1e10e832ff9bab2 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -458,7 +458,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = self.parameters['redshift']
+            self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']


https://bitbucket.org/yt_analysis/yt-3.0/commits/7be3be563bc3/
Changeset:   7be3be563bc3
Branch:      yt
User:        xarthisius
Date:        2013-06-22 11:12:31
Summary:     Search for hdf5 library/headers in default paths on posix systems. Fixes #597
Affected #:  1 file

diff -r 3a638f82a37eb6a647ca4cda18446d94a055f221 -r 7be3be563bc3d0d2d94b3c1e0956649a3bbe6969 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,6 +1,64 @@
 #!/usr/bin/env python
 import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
+
+
+# snatched from PyTables
+def add_from_path(envname, dirs):
+    try:
+        dirs.extend(os.environ[envname].split(os.pathsep))
+    except KeyError:
+        pass
+
+
+# snatched from PyTables
+def add_from_flags(envname, flag_key, dirs):
+    for flag in os.environ.get(envname, "").split():
+        if flag.startswith(flag_key):
+            dirs.append(flag[len(flag_key):])
+
+
+# snatched from PyTables
+def get_default_dirs():
+    default_header_dirs = []
+    add_from_path("CPATH", default_header_dirs)
+    add_from_path("C_INCLUDE_PATH", default_header_dirs)
+    add_from_flags("CPPFLAGS", "-I", default_header_dirs)
+    default_header_dirs.extend(['/usr/include', '/usr/local/include'])
+
+    default_library_dirs = []
+    add_from_flags("LDFLAGS", "-L", default_library_dirs)
+    default_library_dirs.extend(
+        os.path.join(_tree, _arch)
+        for _tree in ('/', '/usr', '/usr/local')
+        for _arch in ('lib64', 'lib')
+    )
+    return default_header_dirs, default_library_dirs
+
+
+def get_hdf5_include(header_dirs):
+    for inc_prefix in header_dirs:
+        if os.path.isfile(os.path.join(inc_prefix, "hdf5.h")):
+            return inc_prefix
+    return None
+
+
+def get_hdf5_lib(lib_dirs):
+    import ctypes
+    hdf5_libfile = ctypes.util.find_library("hdf5")
+    if os.path.isfile(hdf5_libfile):
+        return os.path.dirname(hdf5_libfile)
+    for lib_dir in lib_dirs:
+        try:
+            ctypes.CDLL(os.path.join(lib_dir, hdf5_libfile))
+            return lib_dir
+        except OSError:
+            pass
+    return None
+
 
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
@@ -17,7 +75,16 @@
         hdf5_lib = os.path.join(hdf5_dir, "lib")
         print "HDF5_LOCATION: hdf5.cfg: %s, %s" % (hdf5_inc, hdf5_lib)
         return (hdf5_inc, hdf5_lib)
-    # Now we see if ctypes can help us:
+    if os.name == 'posix':
+        default_header_dirs, default_library_dirs = get_default_dirs()
+        hdf5_inc = get_hdf5_include(default_header_dirs)
+        hdf5_lib = get_hdf5_lib(default_library_dirs)
+        if None not in (hdf5_inc, hdf5_lib):
+            print(
+                "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc, hdf5_lib))
+            return (hdf5_inc, hdf5_lib)
+
+    # Now we see if ctypes can help us on non posix platform
     try:
         import ctypes.util
         hdf5_libfile = ctypes.util.find_library("hdf5")
@@ -31,8 +98,8 @@
                 hdf5_inc = os.path.join(hdf5_dir, "include")
                 hdf5_lib = os.path.join(hdf5_dir, "lib")
                 print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
-                    hdf5_lib)
-                return hdf5_inc, hdf5_lib
+                                                                hdf5_lib)
+                return (hdf5_inc, hdf5_lib)
     except ImportError:
         pass
     print "Reading HDF5 location from hdf5.cfg failed."
@@ -55,22 +122,23 @@
     config.add_subpackage("parallel_tools")
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
-                "yt/utilities/data_point_utilities.c", libraries=["m"])
+                         "yt/utilities/data_point_utilities.c",
+                         libraries=["m"])
     config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]
     config.add_extension("hdf5_light_reader",
-                        "yt/utilities/hdf5_light_reader.c",
+                         "yt/utilities/hdf5_light_reader.c",
                          define_macros=[("H5_USE_16_API", True)],
                          libraries=["m", "hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
     config.add_extension("libconfig_wrapper",
-        ["yt/utilities/libconfig_wrapper.pyx"] +
-         glob.glob("yt/utilities/_libconfig/*.c"),
-        include_dirs=["yt/utilities/_libconfig/"],
-        define_macros=[("HAVE_XLOCALE_H", True)]
-        )
+                         ["yt/utilities/libconfig_wrapper.pyx"] +
+                         glob.glob("yt/utilities/_libconfig/*.c"),
+                         include_dirs=["yt/utilities/_libconfig/"],
+                         define_macros=[("HAVE_XLOCALE_H", True)]
+                         )
     config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     return config


https://bitbucket.org/yt_analysis/yt-3.0/commits/19b30715ff0e/
Changeset:   19b30715ff0e
Branch:      yt
User:        xarthisius
Date:        2013-06-23 13:40:41
Summary:     Refactor code responsible for detecting lib/include paths from env var, cfg file and by using ctypes respectively
Affected #:  2 files

diff -r 7be3be563bc3d0d2d94b3c1e0956649a3bbe6969 -r 19b30715ff0e758fddbcb8ee077279d2497918c4 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,24 +1,27 @@
 #!/usr/bin/env python
 import setuptools
 import os, sys, os.path, glob, \
-  tempfile, subprocess, shutil
+    tempfile, subprocess, shutil
+from yt.utilities.setup import \
+    get_location_from_env, get_location_from_cfg, get_location_from_ctypes
 
 def check_for_png():
     # First up: HDF5_DIR in environment
     if "PNG_DIR" in os.environ:
-        png_dir = os.environ["PNG_DIR"]
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
+        return get_location_from_env("PNG_DIR")
     # Next up, we try png.cfg
     elif os.path.exists("png.cfg"):
-        png_dir = open("png.cfg").read().strip()
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
+        return get_location_from_cfg("png.cfg")
+    if os.name == 'posix':
+        png_inc, png_lib = get_location_from_ctypes("png.h", "png")
+    if None not in (png_inc, png_lib):
+        print(
+            "PNG_LOCATION: PNG found via ctypes in: %s, %s" \
+                % (png_inc, png_lib)
+        )
         return (png_inc, png_lib)
-    # Now we see if ctypes can help us:
+
+    # Now we see if ctypes can help us on non posix platform
     try:
         import ctypes.util
         png_libfile = ctypes.util.find_library("png")
@@ -53,19 +56,20 @@
 def check_for_freetype():
     # First up: environment
     if "FTYPE_DIR" in os.environ:
-        freetype_dir = os.environ["FTYPE_DIR"]
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
+        return get_location_from_env("FTYPE_DIR")
     # Next up, we try freetype.cfg
     elif os.path.exists("freetype.cfg"):
-        freetype_dir = open("freetype.cfg").read().strip()
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
+        return get_location_from_cfg("freetype.cfg")
+    if os.name == 'posix':
+        freetype_inc, freetype_lib = \
+                get_location_from_ctypes("ft2build.h", "freetype")
+    if None not in (freetype_inc, freetype_lib):
+        print(
+            "FTYPE_LOCATION: freetype found via ctypes in: %s, %s" \
+                % (freetype_inc, freetype_lib)
+        )
         return (freetype_inc, freetype_lib)
-    # Now we see if ctypes can help us:
+    # Now we see if ctypes can help us on non posix platform
     try:
         import ctypes.util
         freetype_libfile = ctypes.util.find_library("freetype")
@@ -122,7 +126,7 @@
     with open(os.devnull, 'w') as fnull:
         exit_code = subprocess.call([compiler, '-fopenmp', filename],
                                     stdout=fnull, stderr=fnull)
-        
+
     # Clean up
     file.close()
     os.chdir(curdir)

diff -r 7be3be563bc3d0d2d94b3c1e0956649a3bbe6969 -r 19b30715ff0e758fddbcb8ee077279d2497918c4 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -39,50 +39,64 @@
     return default_header_dirs, default_library_dirs
 
 
-def get_hdf5_include(header_dirs):
-    for inc_prefix in header_dirs:
-        if os.path.isfile(os.path.join(inc_prefix, "hdf5.h")):
-            return inc_prefix
-    return None
+def get_location_from_env(env):
+    env_dir = os.environ[env]
+    env_inc = os.path.join(env_dir, "include")
+    env_lib = os.path.join(env_dir, "lib")
+    print "%s_LOCATION: %s: %s, %s" \
+        % (env.split('_')[0], env, env_inc, env_lib)
+    return (env_inc, env_lib)
 
 
-def get_hdf5_lib(lib_dirs):
-    import ctypes
-    hdf5_libfile = ctypes.util.find_library("hdf5")
-    if os.path.isfile(hdf5_libfile):
-        return os.path.dirname(hdf5_libfile)
-    for lib_dir in lib_dirs:
+def get_location_from_cfg(cfg):
+    cfg_dir = open(cfg).read().strip()
+    cfg_inc = os.path.join(cfg_dir, "include")
+    cfg_lib = os.path.join(cfg_dir, "lib")
+    print "%s_LOCATION: %s: %s, %s" \
+        % (cfg.split('.')[0].upper(), cfg, cfg_inc, cfg_lib)
+    return (cfg_inc, cfg_lib)
+
+
+def get_location_from_ctypes(header, library):
+    try:
+        import ctypes
+        import ctypes.util
+    except ImportError:
+        return (None, None)
+
+    target_libfile = ctypes.util.find_library(library)
+    default_header_dirs, default_library_dirs = get_default_dirs()
+    target_inc, target_libdir = None, None
+    for inc_prefix in default_header_dirs:
+        if os.path.isfile(os.path.join(inc_prefix, header)):
+            target_inc = inc_prefix
+
+    if os.path.isfile(target_libfile):
+        return os.path.dirname(target_libfile)
+    for lib_dir in default_library_dirs:
         try:
-            ctypes.CDLL(os.path.join(lib_dir, hdf5_libfile))
-            return lib_dir
+            ctypes.CDLL(os.path.join(lib_dir, target_libfile))
+            target_libdir = lib_dir
         except OSError:
             pass
-    return None
+    return (target_inc, target_libdir)
 
 
 def check_for_hdf5():
     # First up: HDF5_DIR in environment
     if "HDF5_DIR" in os.environ:
-        hdf5_dir = os.environ["HDF5_DIR"]
-        hdf5_inc = os.path.join(hdf5_dir, "include")
-        hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: HDF5_DIR: %s, %s" % (hdf5_inc, hdf5_lib)
-        return (hdf5_inc, hdf5_lib)
+        return get_location_from_env("HDF5_DIR")
     # Next up, we try hdf5.cfg
     elif os.path.exists("hdf5.cfg"):
-        hdf5_dir = open("hdf5.cfg").read().strip()
-        hdf5_inc = os.path.join(hdf5_dir, "include")
-        hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: hdf5.cfg: %s, %s" % (hdf5_inc, hdf5_lib)
+        return get_location_from_cfg("hdf5.cfg")
+    if os.name == 'posix':
+        hdf5_inc, hdf5_lib = get_location_from_ctypes("hdf5.h", "hdf5")
+    if None not in (hdf5_inc, hdf5_lib):
+        print(
+            "HDF5_LOCATION: HDF5 found via ctypes in: %s, %s"
+            % (hdf5_inc, hdf5_lib)
+        )
         return (hdf5_inc, hdf5_lib)
-    if os.name == 'posix':
-        default_header_dirs, default_library_dirs = get_default_dirs()
-        hdf5_inc = get_hdf5_include(default_header_dirs)
-        hdf5_lib = get_hdf5_lib(default_library_dirs)
-        if None not in (hdf5_inc, hdf5_lib):
-            print(
-                "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc, hdf5_lib))
-            return (hdf5_inc, hdf5_lib)
 
     # Now we see if ctypes can help us on non posix platform
     try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/39b9d0d919db/
Changeset:   39b9d0d919db
Branch:      yt
User:        xarthisius
Date:        2013-06-23 13:48:21
Summary:     Add /usr/X11 to default search paths, remove duplicated code
Affected #:  2 files

diff -r 19b30715ff0e758fddbcb8ee077279d2497918c4 -r 39b9d0d919db0d9891e701daabda700f75114495 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -12,6 +12,7 @@
     # Next up, we try png.cfg
     elif os.path.exists("png.cfg"):
         return get_location_from_cfg("png.cfg")
+    # Now we see if ctypes can help us
     if os.name == 'posix':
         png_inc, png_lib = get_location_from_ctypes("png.h", "png")
     if None not in (png_inc, png_lib):
@@ -21,33 +22,6 @@
         )
         return (png_inc, png_lib)
 
-    # Now we see if ctypes can help us on non posix platform
-    try:
-        import ctypes.util
-        png_libfile = ctypes.util.find_library("png")
-        if png_libfile is not None and os.path.isfile(png_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            png_dir = os.path.dirname(os.path.dirname(png_libfile))
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
     print "Reading png location from png.cfg failed."
     print "Please place the base directory of your png install in png.cfg and restart."
     print "(ex: \"echo '/usr/local/' > png.cfg\" )"
@@ -60,6 +34,7 @@
     # Next up, we try freetype.cfg
     elif os.path.exists("freetype.cfg"):
         return get_location_from_cfg("freetype.cfg")
+    # Now we see if ctypes can help us
     if os.name == 'posix':
         freetype_inc, freetype_lib = \
                 get_location_from_ctypes("ft2build.h", "freetype")
@@ -69,33 +44,7 @@
                 % (freetype_inc, freetype_lib)
         )
         return (freetype_inc, freetype_lib)
-    # Now we see if ctypes can help us on non posix platform
-    try:
-        import ctypes.util
-        freetype_libfile = ctypes.util.find_library("freetype")
-        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
+
     print "Reading freetype location from freetype.cfg failed."
     print "Please place the base directory of your freetype install in freetype.cfg and restart."
     print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"

diff -r 19b30715ff0e758fddbcb8ee077279d2497918c4 -r 39b9d0d919db0d9891e701daabda700f75114495 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -27,13 +27,15 @@
     add_from_path("CPATH", default_header_dirs)
     add_from_path("C_INCLUDE_PATH", default_header_dirs)
     add_from_flags("CPPFLAGS", "-I", default_header_dirs)
-    default_header_dirs.extend(['/usr/include', '/usr/local/include'])
+    default_header_dirs.extend(
+        ['/usr/include', '/usr/local/include', '/usr/X11']
+    )
 
     default_library_dirs = []
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
         os.path.join(_tree, _arch)
-        for _tree in ('/', '/usr', '/usr/local')
+        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
         for _arch in ('lib64', 'lib')
     )
     return default_header_dirs, default_library_dirs
@@ -89,6 +91,7 @@
     # Next up, we try hdf5.cfg
     elif os.path.exists("hdf5.cfg"):
         return get_location_from_cfg("hdf5.cfg")
+    # Now we see if ctypes can help us
     if os.name == 'posix':
         hdf5_inc, hdf5_lib = get_location_from_ctypes("hdf5.h", "hdf5")
     if None not in (hdf5_inc, hdf5_lib):
@@ -98,24 +101,6 @@
         )
         return (hdf5_inc, hdf5_lib)
 
-    # Now we see if ctypes can help us on non posix platform
-    try:
-        import ctypes.util
-        hdf5_libfile = ctypes.util.find_library("hdf5")
-        if hdf5_libfile is not None and os.path.isfile(hdf5_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            hdf5_dir = os.path.dirname(os.path.dirname(hdf5_libfile))
-            if os.path.isdir(os.path.join(hdf5_dir, "include")) and \
-               os.path.isfile(os.path.join(hdf5_dir, "include", "hdf5.h")):
-                hdf5_inc = os.path.join(hdf5_dir, "include")
-                hdf5_lib = os.path.join(hdf5_dir, "lib")
-                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
-                                                                hdf5_lib)
-                return (hdf5_inc, hdf5_lib)
-    except ImportError:
-        pass
     print "Reading HDF5 location from hdf5.cfg failed."
     print "Please place the base directory of your"
     print "HDF5 install in hdf5.cfg and restart."


https://bitbucket.org/yt_analysis/yt-3.0/commits/7bdf4e01e6cf/
Changeset:   7bdf4e01e6cf
Branch:      yt
User:        xarthisius
Date:        2013-06-23 13:59:18
Summary:     Further refactoring of the code that checks for dependencies
Affected #:  2 files

diff -r 39b9d0d919db0d9891e701daabda700f75114495 -r 7bdf4e01e6cfd57df79ebfb7ee7e3b3eb67a294c yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -3,53 +3,18 @@
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
 from yt.utilities.setup import \
-    get_location_from_env, get_location_from_cfg, get_location_from_ctypes
+    check_for_dependencies
+
 
 def check_for_png():
-    # First up: HDF5_DIR in environment
-    if "PNG_DIR" in os.environ:
-        return get_location_from_env("PNG_DIR")
-    # Next up, we try png.cfg
-    elif os.path.exists("png.cfg"):
-        return get_location_from_cfg("png.cfg")
-    # Now we see if ctypes can help us
-    if os.name == 'posix':
-        png_inc, png_lib = get_location_from_ctypes("png.h", "png")
-    if None not in (png_inc, png_lib):
-        print(
-            "PNG_LOCATION: PNG found via ctypes in: %s, %s" \
-                % (png_inc, png_lib)
-        )
-        return (png_inc, png_lib)
+    return check_for_dependencies("PNG_DIR", "png.cfg", "png.h", "png")
 
-    print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
-    sys.exit(1)
 
 def check_for_freetype():
-    # First up: environment
-    if "FTYPE_DIR" in os.environ:
-        return get_location_from_env("FTYPE_DIR")
-    # Next up, we try freetype.cfg
-    elif os.path.exists("freetype.cfg"):
-        return get_location_from_cfg("freetype.cfg")
-    # Now we see if ctypes can help us
-    if os.name == 'posix':
-        freetype_inc, freetype_lib = \
-                get_location_from_ctypes("ft2build.h", "freetype")
-    if None not in (freetype_inc, freetype_lib):
-        print(
-            "FTYPE_LOCATION: freetype found via ctypes in: %s, %s" \
-                % (freetype_inc, freetype_lib)
-        )
-        return (freetype_inc, freetype_lib)
+    return check_for_dependencies(
+        "FTYPE_DIR", "freetype.cfg", "ft2build.h", "freetype"
+    )
 
-    print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
-    print "You can locate this by looking for the file ft2build.h"
-    sys.exit(1)
 
 def check_for_openmp():
     # Create a temporary directory

diff -r 39b9d0d919db0d9891e701daabda700f75114495 -r 7bdf4e01e6cfd57df79ebfb7ee7e3b3eb67a294c yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -84,30 +84,34 @@
     return (target_inc, target_libdir)
 
 
-def check_for_hdf5():
-    # First up: HDF5_DIR in environment
-    if "HDF5_DIR" in os.environ:
-        return get_location_from_env("HDF5_DIR")
-    # Next up, we try hdf5.cfg
-    elif os.path.exists("hdf5.cfg"):
-        return get_location_from_cfg("hdf5.cfg")
+def check_for_dependencies(env, cfg, header, library):
+    # First up: check in environment
+    if env in os.environ:
+        return get_location_from_env(env)
+    # Next up, we try config file
+    elif os.path.exists(cfg):
+        return get_location_from_cfg(cfg)
     # Now we see if ctypes can help us
     if os.name == 'posix':
-        hdf5_inc, hdf5_lib = get_location_from_ctypes("hdf5.h", "hdf5")
-    if None not in (hdf5_inc, hdf5_lib):
+        target_inc, target_lib = get_location_from_ctypes(header, library)
+    if None not in (target_inc, target_lib):
         print(
-            "HDF5_LOCATION: HDF5 found via ctypes in: %s, %s"
-            % (hdf5_inc, hdf5_lib)
+            "%s_LOCATION: %s found via ctypes in: %s, %s"
+            % (env.split('_')[0], env.split('_')[0], target_inc, target_lib)
         )
-        return (hdf5_inc, hdf5_lib)
+        return (target_inc, target_lib)
 
-    print "Reading HDF5 location from hdf5.cfg failed."
-    print "Please place the base directory of your"
-    print "HDF5 install in hdf5.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > hdf5.cfg\" )"
+    print("Reading %s location from %s failed." % (env.split('_')[0], cfg))
+    print("Please place the base directory of your")
+    print("%s install in %s and restart." % (env.split('_')[0], cfg))
+    print("(ex: \"echo '/usr/local/' > %s\" )" % cfg)
+    print("You can locate the path by looking for %s" % header)
     sys.exit(1)
 
 
+def check_for_hdf5():
+    return check_for_dependencies("HDF5_DIR", "hdf5.cfg", "hdf5.h", "hdf5")
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('utilities', parent_package, top_path)


https://bitbucket.org/yt_analysis/yt-3.0/commits/b66b8940338f/
Changeset:   b66b8940338f
Branch:      yt
User:        xarthisius
Date:        2013-06-25 10:28:34
Summary:     Return a proper tuple of header and libdir if libfile is found directly via ctypes.util. Thanks Nathan!
Affected #:  1 file

diff -r 7bdf4e01e6cfd57df79ebfb7ee7e3b3eb67a294c -r b66b8940338fe8d36a768668983fe007fff0241c yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -74,7 +74,7 @@
             target_inc = inc_prefix
 
     if os.path.isfile(target_libfile):
-        return os.path.dirname(target_libfile)
+        return (target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:
             ctypes.CDLL(os.path.join(lib_dir, target_libfile))


https://bitbucket.org/yt_analysis/yt-3.0/commits/6ac34027d89b/
Changeset:   6ac34027d89b
Branch:      yt
User:        xarthisius
Date:        2013-06-27 07:57:05
Summary:     Check if target_libfile is not None before trying to invoke isfile on it. Thanks to Nathan for catching this. Remove unused setuptools
Affected #:  1 file

diff -r b66b8940338fe8d36a768668983fe007fff0241c -r 6ac34027d89b1dad038ae127066206df4ff4a0b0 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-import setuptools
 import os
 import sys
 import os.path
@@ -66,14 +65,14 @@
     except ImportError:
         return (None, None)
 
-    target_libfile = ctypes.util.find_library(library)
     default_header_dirs, default_library_dirs = get_default_dirs()
     target_inc, target_libdir = None, None
     for inc_prefix in default_header_dirs:
         if os.path.isfile(os.path.join(inc_prefix, header)):
             target_inc = inc_prefix
 
-    if os.path.isfile(target_libfile):
+    target_libfile = ctypes.util.find_library(library)
+    if target_libfile is not None and os.path.isfile(target_libfile):
         return (target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/ac8b89a5f6ed/
Changeset:   ac8b89a5f6ed
Branch:      yt
User:        xarthisius
Date:        2013-06-30 11:49:11
Summary:     Sanitize get_location_from_ctypes
Affected #:  1 file

diff -r 6ac34027d89b1dad038ae127066206df4ff4a0b0 -r ac8b89a5f6ed1199a94efef0c4915430bbd5dffb yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -44,8 +44,8 @@
     env_dir = os.environ[env]
     env_inc = os.path.join(env_dir, "include")
     env_lib = os.path.join(env_dir, "lib")
-    print "%s_LOCATION: %s: %s, %s" \
-        % (env.split('_')[0], env, env_inc, env_lib)
+    print("%s_LOCATION: %s: %s, %s"
+          % (env.split('_')[0], env, env_inc, env_lib))
     return (env_inc, env_lib)
 
 
@@ -53,11 +53,21 @@
     cfg_dir = open(cfg).read().strip()
     cfg_inc = os.path.join(cfg_dir, "include")
     cfg_lib = os.path.join(cfg_dir, "lib")
-    print "%s_LOCATION: %s: %s, %s" \
-        % (cfg.split('.')[0].upper(), cfg, cfg_inc, cfg_lib)
+    print("%s_LOCATION: %s: %s, %s"
+          % (cfg.split('.')[0].upper(), cfg, cfg_inc, cfg_lib))
     return (cfg_inc, cfg_lib)
 
 
+def check_prefix(inc_dir, lib_dir):
+    prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
+    if prefix is not '' and prefix == os.path.dirname(inc_dir):
+        return (inc_dir, lib_dir)
+    else:
+        print("It seems that include prefix is different from lib prefix")
+        print("Please use either env variable or cfg to set proper path")
+        return (None, None)
+
+
 def get_location_from_ctypes(header, library):
     try:
         import ctypes
@@ -73,14 +83,14 @@
 
     target_libfile = ctypes.util.find_library(library)
     if target_libfile is not None and os.path.isfile(target_libfile):
-        return (target_inc, os.path.dirname(target_libfile))
+        return check_prefix(target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:
             ctypes.CDLL(os.path.join(lib_dir, target_libfile))
             target_libdir = lib_dir
         except OSError:
             pass
-    return (target_inc, target_libdir)
+    return check_prefix(target_inc, target_libdir)
 
 
 def check_for_dependencies(env, cfg, header, library):
@@ -111,6 +121,7 @@
 def check_for_hdf5():
     return check_for_dependencies("HDF5_DIR", "hdf5.cfg", "hdf5.h", "hdf5")
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('utilities', parent_package, top_path)


https://bitbucket.org/yt_analysis/yt-3.0/commits/b67a988e966d/
Changeset:   b67a988e966d
Branch:      yt
User:        ngoldbaum
Date:        2013-07-01 19:20:54
Summary:     Merged in xarthisius/yt (pull request #534)

Search for hdf5 library/headers in default paths on posix systems. Fixes #597
Affected #:  2 files

diff -r 74891da30e86ad33ecad7614e1e10e832ff9bab2 -r b67a988e966df8f09e6783ca78f5d7050fd1b14d yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -1,102 +1,20 @@
 #!/usr/bin/env python
 import setuptools
 import os, sys, os.path, glob, \
-  tempfile, subprocess, shutil
+    tempfile, subprocess, shutil
+from yt.utilities.setup import \
+    check_for_dependencies
+
 
 def check_for_png():
-    # First up: HDF5_DIR in environment
-    if "PNG_DIR" in os.environ:
-        png_dir = os.environ["PNG_DIR"]
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: PNG_DIR: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Next up, we try png.cfg
-    elif os.path.exists("png.cfg"):
-        png_dir = open("png.cfg").read().strip()
-        png_inc = os.path.join(png_dir, "include")
-        png_lib = os.path.join(png_dir, "lib")
-        print "PNG_LOCATION: png.cfg: %s, %s" % (png_inc, png_lib)
-        return (png_inc, png_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        png_libfile = ctypes.util.find_library("png")
-        if png_libfile is not None and os.path.isfile(png_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            png_dir = os.path.dirname(os.path.dirname(png_libfile))
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for png_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-            if os.path.isdir(os.path.join(png_dir, "include")) and \
-               os.path.isfile(os.path.join(png_dir, "include", "png.h")):
-                png_inc = os.path.join(png_dir, "include")
-                png_lib = os.path.join(png_dir, "lib")
-                print "PNG_LOCATION: png found in: %s, %s" % (png_inc, png_lib)
-                return png_inc, png_lib
-    print "Reading png location from png.cfg failed."
-    print "Please place the base directory of your png install in png.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > png.cfg\" )"
-    sys.exit(1)
+    return check_for_dependencies("PNG_DIR", "png.cfg", "png.h", "png")
+
 
 def check_for_freetype():
-    # First up: environment
-    if "FTYPE_DIR" in os.environ:
-        freetype_dir = os.environ["FTYPE_DIR"]
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: FTYPE_DIR: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Next up, we try freetype.cfg
-    elif os.path.exists("freetype.cfg"):
-        freetype_dir = open("freetype.cfg").read().strip()
-        freetype_inc = os.path.join(freetype_dir, "include")
-        freetype_lib = os.path.join(freetype_dir, "lib")
-        print "FTYPE_LOCATION: freetype.cfg: %s, %s" % (freetype_inc, freetype_lib)
-        return (freetype_inc, freetype_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        freetype_libfile = ctypes.util.find_library("freetype")
-        if freetype_libfile is not None and os.path.isfile(freetype_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            freetype_dir = os.path.dirname(os.path.dirname(freetype_libfile))
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    except ImportError:
-        pass
-    # X11 is where it's located by default on OSX, although I am slightly
-    # reluctant to link against that one.
-    for freetype_dir in ["/usr/", "/usr/local/", "/usr/X11/"]:
-        if os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-            if os.path.isdir(os.path.join(freetype_dir, "include")) and \
-               os.path.isfile(os.path.join(freetype_dir, "include", "ft2build.h")):
-                freetype_inc = os.path.join(freetype_dir, "include")
-                freetype_lib = os.path.join(freetype_dir, "lib")
-                print "FTYPE_LOCATION: freetype found in: %s, %s" % (freetype_inc, freetype_lib)
-                return freetype_inc, freetype_lib
-    print "Reading freetype location from freetype.cfg failed."
-    print "Please place the base directory of your freetype install in freetype.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > freetype.cfg\" )"
-    print "You can locate this by looking for the file ft2build.h"
-    sys.exit(1)
+    return check_for_dependencies(
+        "FTYPE_DIR", "freetype.cfg", "ft2build.h", "freetype"
+    )
+
 
 def check_for_openmp():
     # Create a temporary directory
@@ -122,7 +40,7 @@
     with open(os.devnull, 'w') as fnull:
         exit_code = subprocess.call([compiler, '-fopenmp', filename],
                                     stdout=fnull, stderr=fnull)
-        
+
     # Clean up
     file.close()
     os.chdir(curdir)

diff -r 74891da30e86ad33ecad7614e1e10e832ff9bab2 -r b67a988e966df8f09e6783ca78f5d7050fd1b14d yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -1,45 +1,125 @@
 #!/usr/bin/env python
-import setuptools
-import os, sys, os.path, glob
+import os
+import sys
+import os.path
+import glob
+
+
+# snatched from PyTables
+def add_from_path(envname, dirs):
+    try:
+        dirs.extend(os.environ[envname].split(os.pathsep))
+    except KeyError:
+        pass
+
+
+# snatched from PyTables
+def add_from_flags(envname, flag_key, dirs):
+    for flag in os.environ.get(envname, "").split():
+        if flag.startswith(flag_key):
+            dirs.append(flag[len(flag_key):])
+
+
+# snatched from PyTables
+def get_default_dirs():
+    default_header_dirs = []
+    add_from_path("CPATH", default_header_dirs)
+    add_from_path("C_INCLUDE_PATH", default_header_dirs)
+    add_from_flags("CPPFLAGS", "-I", default_header_dirs)
+    default_header_dirs.extend(
+        ['/usr/include', '/usr/local/include', '/usr/X11']
+    )
+
+    default_library_dirs = []
+    add_from_flags("LDFLAGS", "-L", default_library_dirs)
+    default_library_dirs.extend(
+        os.path.join(_tree, _arch)
+        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
+        for _arch in ('lib64', 'lib')
+    )
+    return default_header_dirs, default_library_dirs
+
+
+def get_location_from_env(env):
+    env_dir = os.environ[env]
+    env_inc = os.path.join(env_dir, "include")
+    env_lib = os.path.join(env_dir, "lib")
+    print("%s_LOCATION: %s: %s, %s"
+          % (env.split('_')[0], env, env_inc, env_lib))
+    return (env_inc, env_lib)
+
+
+def get_location_from_cfg(cfg):
+    cfg_dir = open(cfg).read().strip()
+    cfg_inc = os.path.join(cfg_dir, "include")
+    cfg_lib = os.path.join(cfg_dir, "lib")
+    print("%s_LOCATION: %s: %s, %s"
+          % (cfg.split('.')[0].upper(), cfg, cfg_inc, cfg_lib))
+    return (cfg_inc, cfg_lib)
+
+
+def check_prefix(inc_dir, lib_dir):
+    prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
+    if prefix is not '' and prefix == os.path.dirname(inc_dir):
+        return (inc_dir, lib_dir)
+    else:
+        print("It seems that include prefix is different from lib prefix")
+        print("Please use either env variable or cfg to set proper path")
+        return (None, None)
+
+
+def get_location_from_ctypes(header, library):
+    try:
+        import ctypes
+        import ctypes.util
+    except ImportError:
+        return (None, None)
+
+    default_header_dirs, default_library_dirs = get_default_dirs()
+    target_inc, target_libdir = None, None
+    for inc_prefix in default_header_dirs:
+        if os.path.isfile(os.path.join(inc_prefix, header)):
+            target_inc = inc_prefix
+
+    target_libfile = ctypes.util.find_library(library)
+    if target_libfile is not None and os.path.isfile(target_libfile):
+        return check_prefix(target_inc, os.path.dirname(target_libfile))
+    for lib_dir in default_library_dirs:
+        try:
+            ctypes.CDLL(os.path.join(lib_dir, target_libfile))
+            target_libdir = lib_dir
+        except OSError:
+            pass
+    return check_prefix(target_inc, target_libdir)
+
+
+def check_for_dependencies(env, cfg, header, library):
+    # First up: check in environment
+    if env in os.environ:
+        return get_location_from_env(env)
+    # Next up, we try config file
+    elif os.path.exists(cfg):
+        return get_location_from_cfg(cfg)
+    # Now we see if ctypes can help us
+    if os.name == 'posix':
+        target_inc, target_lib = get_location_from_ctypes(header, library)
+    if None not in (target_inc, target_lib):
+        print(
+            "%s_LOCATION: %s found via ctypes in: %s, %s"
+            % (env.split('_')[0], env.split('_')[0], target_inc, target_lib)
+        )
+        return (target_inc, target_lib)
+
+    print("Reading %s location from %s failed." % (env.split('_')[0], cfg))
+    print("Please place the base directory of your")
+    print("%s install in %s and restart." % (env.split('_')[0], cfg))
+    print("(ex: \"echo '/usr/local/' > %s\" )" % cfg)
+    print("You can locate the path by looking for %s" % header)
+    sys.exit(1)
+
 
 def check_for_hdf5():
-    # First up: HDF5_DIR in environment
-    if "HDF5_DIR" in os.environ:
-        hdf5_dir = os.environ["HDF5_DIR"]
-        hdf5_inc = os.path.join(hdf5_dir, "include")
-        hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: HDF5_DIR: %s, %s" % (hdf5_inc, hdf5_lib)
-        return (hdf5_inc, hdf5_lib)
-    # Next up, we try hdf5.cfg
-    elif os.path.exists("hdf5.cfg"):
-        hdf5_dir = open("hdf5.cfg").read().strip()
-        hdf5_inc = os.path.join(hdf5_dir, "include")
-        hdf5_lib = os.path.join(hdf5_dir, "lib")
-        print "HDF5_LOCATION: hdf5.cfg: %s, %s" % (hdf5_inc, hdf5_lib)
-        return (hdf5_inc, hdf5_lib)
-    # Now we see if ctypes can help us:
-    try:
-        import ctypes.util
-        hdf5_libfile = ctypes.util.find_library("hdf5")
-        if hdf5_libfile is not None and os.path.isfile(hdf5_libfile):
-            # Now we've gotten a library, but we'll need to figure out the
-            # includes if this is going to work.  It feels like there is a
-            # better way to pull off two directory names.
-            hdf5_dir = os.path.dirname(os.path.dirname(hdf5_libfile))
-            if os.path.isdir(os.path.join(hdf5_dir, "include")) and \
-               os.path.isfile(os.path.join(hdf5_dir, "include", "hdf5.h")):
-                hdf5_inc = os.path.join(hdf5_dir, "include")
-                hdf5_lib = os.path.join(hdf5_dir, "lib")
-                print "HDF5_LOCATION: HDF5 found in: %s, %s" % (hdf5_inc,
-                    hdf5_lib)
-                return hdf5_inc, hdf5_lib
-    except ImportError:
-        pass
-    print "Reading HDF5 location from hdf5.cfg failed."
-    print "Please place the base directory of your"
-    print "HDF5 install in hdf5.cfg and restart."
-    print "(ex: \"echo '/usr/local/' > hdf5.cfg\" )"
-    sys.exit(1)
+    return check_for_dependencies("HDF5_DIR", "hdf5.cfg", "hdf5.h", "hdf5")
 
 
 def configuration(parent_package='', top_path=None):
@@ -55,22 +135,23 @@
     config.add_subpackage("parallel_tools")
     config.add_subpackage("lib")
     config.add_extension("data_point_utilities",
-                "yt/utilities/data_point_utilities.c", libraries=["m"])
+                         "yt/utilities/data_point_utilities.c",
+                         libraries=["m"])
     config.add_subpackage("tests")
     hdf5_inc, hdf5_lib = check_for_hdf5()
     include_dirs = [hdf5_inc]
     library_dirs = [hdf5_lib]
     config.add_extension("hdf5_light_reader",
-                        "yt/utilities/hdf5_light_reader.c",
+                         "yt/utilities/hdf5_light_reader.c",
                          define_macros=[("H5_USE_16_API", True)],
                          libraries=["m", "hdf5"],
                          library_dirs=library_dirs, include_dirs=include_dirs)
     config.add_extension("libconfig_wrapper",
-        ["yt/utilities/libconfig_wrapper.pyx"] +
-         glob.glob("yt/utilities/_libconfig/*.c"),
-        include_dirs=["yt/utilities/_libconfig/"],
-        define_macros=[("HAVE_XLOCALE_H", True)]
-        )
+                         ["yt/utilities/libconfig_wrapper.pyx"] +
+                         glob.glob("yt/utilities/_libconfig/*.c"),
+                         include_dirs=["yt/utilities/_libconfig/"],
+                         define_macros=[("HAVE_XLOCALE_H", True)]
+                         )
     config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
+    # config.make_svn_version_py()
     return config


https://bitbucket.org/yt_analysis/yt-3.0/commits/af95c721b2e0/
Changeset:   af95c721b2e0
Branch:      yt
User:        ngoldbaum
Date:        2013-07-02 20:03:43
Summary:     Ensuring that castro domain_dimensions is a numpy array.
Affected #:  1 file

diff -r b67a988e966df8f09e6783ca78f5d7050fd1b14d -r af95c721b2e0cf1b6b795df344b818c6a6c2d988 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -608,7 +608,7 @@
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
         self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = self.parameters["TopGridDimensions"]
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
         self.refine_by = self.parameters.get("RefineBy", 2)
 
         if (self.parameters.has_key("ComovingCoordinates") and


https://bitbucket.org/yt_analysis/yt-3.0/commits/f07ee507cefe/
Changeset:   f07ee507cefe
Branch:      yt
User:        nscudder
Date:        2013-07-03 22:58:38
Summary:     Fixed an issue in volume rendering through the command line, where the camera object would default to 'Density' for its field instead of taking the supplied field.
Affected #:  1 file

diff -r af95c721b2e0cf1b6b795df344b818c6a6c2d988 -r f07ee507cefed300665d0c39435245a9fc299ddf yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1401,7 +1401,7 @@
         tf = ColorTransferFunction((mi-2, ma+2))
         tf.add_layers(n_contours,w=contour_width,col_bounds = (mi,ma), colormap=cmap)
 
-        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
+        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf, fields=[field])
         image = cam.snapshot()
 
         if args.enhance:


https://bitbucket.org/yt_analysis/yt-3.0/commits/0486b1e10e4f/
Changeset:   0486b1e10e4f
Branch:      yt
User:        samskillman
Date:        2013-07-01 17:27:29
Summary:     Quick fix to not let map_to_colormap get an out of bounds error.
Affected #:  1 file

diff -r 74891da30e86ad33ecad7614e1e10e832ff9bab2 -r 0486b1e10e4f0cfd0bcc10c92562ec106dae3d8d yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -641,6 +641,8 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
+        rel0 = max(rel0, 0)
+        rel1 = min(rel1, self.nbins-1)
         tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)


https://bitbucket.org/yt_analysis/yt-3.0/commits/1b51d03f4df7/
Changeset:   1b51d03f4df7
Branch:      yt
User:        ngoldbaum
Date:        2013-07-04 03:14:17
Summary:     Merged in samskillman/yt (pull request #540)

Quick fix to not let map_to_colormap get an out of bounds error.
Affected #:  1 file

diff -r f07ee507cefed300665d0c39435245a9fc299ddf -r 1b51d03f4df7744e1bf44c0d429dd876785164a0 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -641,6 +641,8 @@
             self.x_bounds[0]))
         rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
             self.x_bounds[0]))
+        rel0 = max(rel0, 0)
+        rel1 = min(rel1, self.nbins-1)
         tomap = np.linspace(0.,1.,num=rel1-rel0)
         cmap = get_cmap(colormap)
         cc = cmap(tomap)


https://bitbucket.org/yt_analysis/yt-3.0/commits/76b555d4075c/
Changeset:   76b555d4075c
Branch:      yt
User:        xarthisius
Date:        2013-07-04 08:35:20
Summary:     Don't try to import distribute if setuptools>=0.7 is detected
Affected #:  1 file

diff -r 1b51d03f4df7744e1bf44c0d429dd876785164a0 -r 76b555d4075c708d15d5e157921dcd4fc80f364c setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
 import subprocess
 import shutil
 import glob
-import distribute_setup
-distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+    import distribute_setup
+    distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
 # End snippet
 ######
 
-import setuptools
-
 VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):


https://bitbucket.org/yt_analysis/yt-3.0/commits/213d534593fb/
Changeset:   213d534593fb
Branch:      yt
User:        ngoldbaum
Date:        2013-07-06 06:55:11
Summary:     Fixing a unit issue in universal_fields.py.
Affected #:  1 file

diff -r 76b555d4075c708d15d5e157921dcd4fc80f364c -r 213d534593fb056814ce918c902e4fc3ea3360e9 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1415,7 +1415,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],


https://bitbucket.org/yt_analysis/yt-3.0/commits/d02e199b5586/
Changeset:   d02e199b5586
Branch:      yt
User:        jsoishi
Date:        2013-07-06 18:30:19
Summary:     backported fortran_utils from 3.0
Affected #:  1 file

diff -r 213d534593fb056814ce918c902e4fc3ea3360e9 -r d02e199b558656dfa594e510f00697de0a738220 yt/utilities/fortran_utils.py
--- /dev/null
+++ b/yt/utilities/fortran_utils.py
@@ -0,0 +1,243 @@
+"""
+Utilities for reading Fortran files.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import struct
+import numpy as np
+import os
+
+def read_attrs(f, attrs,endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    according to a definition of attributes, returning a dictionary.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.  Note that this
+    function is used for reading sequentially-written records.  If you have
+    many written that were written simultaneously, see read_record.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    attrs : iterable of iterables
+        This object should be an iterable of one of the formats: 
+        [ (attr_name, count, struct type), ... ].
+        [ ((name1,name2,name3),count, vector type]
+        [ ((name1,name2,name3),count, 'type type type']
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_attrs(f, header)
+    """
+    vv = {}
+    net_format = endian
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        net_format += "".join(["I"] + ([t] * n) + ["I"])
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vv = {}
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        if type(a)==tuple:
+            n = len(a)
+        s1 = vals.pop(0)
+        v = [vals.pop(0) for i in range(n)]
+        s2 = vals.pop(0)
+        if s1 != s2:
+            size = struct.calcsize(endian + "I" + "".join(n*[t]) + "I")
+        assert(s1 == s2)
+        if n == 1: v = v[0]
+        if type(a)==tuple:
+            assert len(a) == len(v)
+            for k,val in zip(a,v):
+                vv[k]=val
+        else:
+            vv[a] = v
+    return vv
+
+def read_vector(f, d, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a vector of values.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    d : data type
+        This is the datatype (from the struct module) that we should read.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    tr : numpy.ndarray
+        This is the vector of values read from the file.
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_vector(f, 'd')
+    """
+    pad_fmt = "%sI" % (endian)
+    pad_size = struct.calcsize(pad_fmt)
+    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
+    vec_fmt = "%s%s" % (endian, d)
+    vec_size = struct.calcsize(vec_fmt)
+    if vec_len % vec_size != 0:
+        print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
+        raise RuntimeError
+    vec_num = vec_len / vec_size
+    if isinstance(f, file): # Needs to be explicitly a file
+        tr = np.fromfile(f, vec_fmt, count=vec_num)
+    else:
+        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
+    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
+    assert(vec_len == vec_len2)
+    return tr
+
+def skip(f, n=1, endian='='):
+    r"""This function accepts a file pointer and skips a Fortran unformatted
+    record. Optionally check that the skip was done correctly by checking 
+    the pad bytes.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    n : int
+        Number of records to skip.
+    check : bool
+        Assert that the pad bytes are equal
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    skipped: The number of elements in the skipped array
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> skip(f, 3)
+    """
+    skipped = []
+    pos = f.tell()
+    for i in range(n):
+        fmt = endian+"I"
+        size = f.read(struct.calcsize(fmt))
+        s1= struct.unpack(fmt, size)[0]
+        f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
+        s2= struct.unpack(fmt, size)[0]
+        assert s1==s2 
+        skipped.append(s1/struct.calcsize(fmt))
+    return skipped
+
+def peek_record_size(f,endian='='):
+    r""" This function accept the file handle and returns
+    the size of the next record and then rewinds the file
+    to the previous position.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    Number of bytes in the next record
+    """
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def read_record(f, rspec, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a single "record" with different components.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    rspec : iterable of iterables
+        This object should be an iterable of the format [ (attr_name, count,
+        struct type), ... ].
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_record(f, header)
+    """
+    vv = {}
+    net_format = endian + "I"
+    for a, n, t in rspec:
+        t = t if len(t)==1 else t[-1]
+        net_format += "%s%s"%(n, t)
+    net_format += "I"
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vvv = vals[:]
+    s1, s2 = vals.pop(0), vals.pop(-1)
+    if s1 != s2:
+        print "S1 = %s ; S2 = %s ; SIZE = %s"
+        raise RuntimeError
+    pos = 0
+    for a, n, t in rspec:
+        vv[a] = vals[pos:pos+n]
+        pos += n
+    return vv
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/a9834c229e60/
Changeset:   a9834c229e60
Branch:      yt
User:        jsoishi
Date:        2013-07-06 19:29:10
Summary:     removing fortran utils, so I can preserve history.
Affected #:  1 file

diff -r d02e199b558656dfa594e510f00697de0a738220 -r a9834c229e603242f63093a62f894839a3fb8cad yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ /dev/null
@@ -1,243 +0,0 @@
-"""
-Utilities for reading Fortran files.
-
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: Columbia University
-Homepage: http://yt.enzotools.org/
-License:
-  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
-
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
-
-import struct
-import numpy as np
-import os
-
-def read_attrs(f, attrs,endian='='):
-    r"""This function accepts a file pointer and reads from that file pointer
-    according to a definition of attributes, returning a dictionary.
-
-    Fortran unformatted files provide total bytesize at the beginning and end
-    of a record.  By correlating the components of that record with attribute
-    names, we construct a dictionary that gets returned.  Note that this
-    function is used for reading sequentially-written records.  If you have
-    many written that were written simultaneously, see read_record.
-
-    Parameters
-    ----------
-    f : File object
-        An open file object.  Should have been opened in mode rb.
-    attrs : iterable of iterables
-        This object should be an iterable of one of the formats: 
-        [ (attr_name, count, struct type), ... ].
-        [ ((name1,name2,name3),count, vector type]
-        [ ((name1,name2,name3),count, 'type type type']
-    endian : str
-        '=' is native, '>' is big, '<' is little endian
-
-    Returns
-    -------
-    values : dict
-        This will return a dict of iterables of the components of the values in
-        the file.
-
-    Examples
-    --------
-
-    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
-    >>> f = open("fort.3", "rb")
-    >>> rv = read_attrs(f, header)
-    """
-    vv = {}
-    net_format = endian
-    for a, n, t in attrs:
-        for end in '@=<>':
-            t = t.replace(end,'')
-        net_format += "".join(["I"] + ([t] * n) + ["I"])
-    size = struct.calcsize(net_format)
-    vals = list(struct.unpack(net_format, f.read(size)))
-    vv = {}
-    for a, n, t in attrs:
-        for end in '@=<>':
-            t = t.replace(end,'')
-        if type(a)==tuple:
-            n = len(a)
-        s1 = vals.pop(0)
-        v = [vals.pop(0) for i in range(n)]
-        s2 = vals.pop(0)
-        if s1 != s2:
-            size = struct.calcsize(endian + "I" + "".join(n*[t]) + "I")
-        assert(s1 == s2)
-        if n == 1: v = v[0]
-        if type(a)==tuple:
-            assert len(a) == len(v)
-            for k,val in zip(a,v):
-                vv[k]=val
-        else:
-            vv[a] = v
-    return vv
-
-def read_vector(f, d, endian='='):
-    r"""This function accepts a file pointer and reads from that file pointer
-    a vector of values.
-
-    Parameters
-    ----------
-    f : File object
-        An open file object.  Should have been opened in mode rb.
-    d : data type
-        This is the datatype (from the struct module) that we should read.
-    endian : str
-        '=' is native, '>' is big, '<' is little endian
-
-    Returns
-    -------
-    tr : numpy.ndarray
-        This is the vector of values read from the file.
-
-    Examples
-    --------
-
-    >>> f = open("fort.3", "rb")
-    >>> rv = read_vector(f, 'd')
-    """
-    pad_fmt = "%sI" % (endian)
-    pad_size = struct.calcsize(pad_fmt)
-    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
-    vec_fmt = "%s%s" % (endian, d)
-    vec_size = struct.calcsize(vec_fmt)
-    if vec_len % vec_size != 0:
-        print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
-        raise RuntimeError
-    vec_num = vec_len / vec_size
-    if isinstance(f, file): # Needs to be explicitly a file
-        tr = np.fromfile(f, vec_fmt, count=vec_num)
-    else:
-        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
-    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
-    assert(vec_len == vec_len2)
-    return tr
-
-def skip(f, n=1, endian='='):
-    r"""This function accepts a file pointer and skips a Fortran unformatted
-    record. Optionally check that the skip was done correctly by checking 
-    the pad bytes.
-
-    Parameters
-    ----------
-    f : File object
-        An open file object.  Should have been opened in mode rb.
-    n : int
-        Number of records to skip.
-    check : bool
-        Assert that the pad bytes are equal
-    endian : str
-        '=' is native, '>' is big, '<' is little endian
-
-    Returns
-    -------
-    skipped: The number of elements in the skipped array
-
-    Examples
-    --------
-
-    >>> f = open("fort.3", "rb")
-    >>> skip(f, 3)
-    """
-    skipped = []
-    pos = f.tell()
-    for i in range(n):
-        fmt = endian+"I"
-        size = f.read(struct.calcsize(fmt))
-        s1= struct.unpack(fmt, size)[0]
-        f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
-        s2= struct.unpack(fmt, size)[0]
-        assert s1==s2 
-        skipped.append(s1/struct.calcsize(fmt))
-    return skipped
-
-def peek_record_size(f,endian='='):
-    r""" This function accept the file handle and returns
-    the size of the next record and then rewinds the file
-    to the previous position.
-
-    Parameters
-    ----------
-    f : File object
-        An open file object.  Should have been opened in mode rb.
-    endian : str
-        '=' is native, '>' is big, '<' is little endian
-
-    Returns
-    -------
-    Number of bytes in the next record
-    """
-    pos = f.tell()
-    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
-    f.seek(pos)
-    return s[0]
-
-def read_record(f, rspec, endian='='):
-    r"""This function accepts a file pointer and reads from that file pointer
-    a single "record" with different components.
-
-    Fortran unformatted files provide total bytesize at the beginning and end
-    of a record.  By correlating the components of that record with attribute
-    names, we construct a dictionary that gets returned.
-
-    Parameters
-    ----------
-    f : File object
-        An open file object.  Should have been opened in mode rb.
-    rspec : iterable of iterables
-        This object should be an iterable of the format [ (attr_name, count,
-        struct type), ... ].
-    endian : str
-        '=' is native, '>' is big, '<' is little endian
-
-    Returns
-    -------
-    values : dict
-        This will return a dict of iterables of the components of the values in
-        the file.
-
-    Examples
-    --------
-
-    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
-    >>> f = open("fort.3", "rb")
-    >>> rv = read_record(f, header)
-    """
-    vv = {}
-    net_format = endian + "I"
-    for a, n, t in rspec:
-        t = t if len(t)==1 else t[-1]
-        net_format += "%s%s"%(n, t)
-    net_format += "I"
-    size = struct.calcsize(net_format)
-    vals = list(struct.unpack(net_format, f.read(size)))
-    vvv = vals[:]
-    s1, s2 = vals.pop(0), vals.pop(-1)
-    if s1 != s2:
-        print "S1 = %s ; S2 = %s ; SIZE = %s"
-        raise RuntimeError
-    pos = 0
-    for a, n, t in rspec:
-        vv[a] = vals[pos:pos+n]
-        pos += n
-    return vv
-


https://bitbucket.org/yt_analysis/yt-3.0/commits/5234f53536d4/
Changeset:   5234f53536d4
Branch:      yt
User:        jsoishi
Date:        2013-07-06 19:31:13
Summary:     backporting fortran_utils.py from yt-3.0
Affected #:  1 file

diff -r a9834c229e603242f63093a62f894839a3fb8cad -r 5234f53536d4de62f1ba40afc68d7f6e2091bd7f yt/utilities/fortran_utils.py
--- /dev/null
+++ b/yt/utilities/fortran_utils.py
@@ -0,0 +1,243 @@
+"""
+Utilities for reading Fortran files.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import struct
+import numpy as np
+import os
+
+def read_attrs(f, attrs,endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    according to a definition of attributes, returning a dictionary.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.  Note that this
+    function is used for reading sequentially-written records.  If you have
+    many written that were written simultaneously, see read_record.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    attrs : iterable of iterables
+        This object should be an iterable of one of the formats: 
+        [ (attr_name, count, struct type), ... ].
+        [ ((name1,name2,name3),count, vector type]
+        [ ((name1,name2,name3),count, 'type type type']
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_attrs(f, header)
+    """
+    vv = {}
+    net_format = endian
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        net_format += "".join(["I"] + ([t] * n) + ["I"])
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vv = {}
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        if type(a)==tuple:
+            n = len(a)
+        s1 = vals.pop(0)
+        v = [vals.pop(0) for i in range(n)]
+        s2 = vals.pop(0)
+        if s1 != s2:
+            size = struct.calcsize(endian + "I" + "".join(n*[t]) + "I")
+        assert(s1 == s2)
+        if n == 1: v = v[0]
+        if type(a)==tuple:
+            assert len(a) == len(v)
+            for k,val in zip(a,v):
+                vv[k]=val
+        else:
+            vv[a] = v
+    return vv
+
+def read_vector(f, d, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a vector of values.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    d : data type
+        This is the datatype (from the struct module) that we should read.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    tr : numpy.ndarray
+        This is the vector of values read from the file.
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_vector(f, 'd')
+    """
+    pad_fmt = "%sI" % (endian)
+    pad_size = struct.calcsize(pad_fmt)
+    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
+    vec_fmt = "%s%s" % (endian, d)
+    vec_size = struct.calcsize(vec_fmt)
+    if vec_len % vec_size != 0:
+        print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
+        raise RuntimeError
+    vec_num = vec_len / vec_size
+    if isinstance(f, file): # Needs to be explicitly a file
+        tr = np.fromfile(f, vec_fmt, count=vec_num)
+    else:
+        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
+    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
+    assert(vec_len == vec_len2)
+    return tr
+
+def skip(f, n=1, endian='='):
+    r"""This function accepts a file pointer and skips a Fortran unformatted
+    record. Optionally check that the skip was done correctly by checking 
+    the pad bytes.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    n : int
+        Number of records to skip.
+    check : bool
+        Assert that the pad bytes are equal
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    skipped: The number of elements in the skipped array
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> skip(f, 3)
+    """
+    skipped = 0
+    pos = f.tell()
+    for i in range(n):
+        fmt = endian+"I"
+        size = f.read(struct.calcsize(fmt))
+        s1= struct.unpack(fmt, size)[0]
+        f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
+        s2= struct.unpack(fmt, size)[0]
+        assert s1==s2 
+        skipped += s1/struct.calcsize(fmt)
+    return skipped
+
+def peek_record_size(f,endian='='):
+    r""" This function accept the file handle and returns
+    the size of the next record and then rewinds the file
+    to the previous position.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    Number of bytes in the next record
+    """
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def read_record(f, rspec, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a single "record" with different components.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    rspec : iterable of iterables
+        This object should be an iterable of the format [ (attr_name, count,
+        struct type), ... ].
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_record(f, header)
+    """
+    vv = {}
+    net_format = endian + "I"
+    for a, n, t in rspec:
+        t = t if len(t)==1 else t[-1]
+        net_format += "%s%s"%(n, t)
+    net_format += "I"
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vvv = vals[:]
+    s1, s2 = vals.pop(0), vals.pop(-1)
+    if s1 != s2:
+        print "S1 = %s ; S2 = %s ; SIZE = %s"
+        raise RuntimeError
+    pos = 0
+    for a, n, t in rspec:
+        vv[a] = vals[pos:pos+n]
+        pos += n
+    return vv
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/9ff8e4bcaa3e/
Changeset:   9ff8e4bcaa3e
Branch:      yt
User:        xarthisius
Date:        2013-07-06 11:36:45
Summary:     Correct dds in covering grid for reduced dimensions in AMR data. Fixes #602
Affected #:  1 file

diff -r 76b555d4075c708d15d5e157921dcd4fc80f364c -r 9ff8e4bcaa3e051fbcabc0bab914e67c23be2c47 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3703,7 +3703,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones


https://bitbucket.org/yt_analysis/yt-3.0/commits/c3e296975053/
Changeset:   c3e296975053
Branch:      yt
User:        MatthewTurk
Date:        2013-07-08 15:15:21
Summary:     Merged in xarthisius/yt (pull request #546)

Correct dds in covering grid for reduced dimensions in AMR data. Fixes #602
Affected #:  1 file

diff -r 5234f53536d4de62f1ba40afc68d7f6e2091bd7f -r c3e296975053621f936fba611bea53b714fa2ae5 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3703,7 +3703,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones


https://bitbucket.org/yt_analysis/yt-3.0/commits/a022794fbc22/
Changeset:   a022794fbc22
Branch:      yt
User:        xarthisius
Date:        2013-07-04 12:59:50
Summary:     Sanitize library searching to take into account multiarch. Always rely on deps installed via script if YT_DEST is installed
Affected #:  1 file

diff -r 1b51d03f4df7744e1bf44c0d429dd876785164a0 -r a022794fbc224507781e06a8ed7a66773f4d8b7b yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -3,6 +3,7 @@
 import sys
 import os.path
 import glob
+import platform
 
 
 # snatched from PyTables
@@ -23,6 +24,8 @@
 # snatched from PyTables
 def get_default_dirs():
     default_header_dirs = []
+    default_library_dirs = []
+
     add_from_path("CPATH", default_header_dirs)
     add_from_path("C_INCLUDE_PATH", default_header_dirs)
     add_from_flags("CPPFLAGS", "-I", default_header_dirs)
@@ -30,12 +33,17 @@
         ['/usr/include', '/usr/local/include', '/usr/X11']
     )
 
-    default_library_dirs = []
+    _archs = ['lib64', 'lib']
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            _archs.extend(['lib/x86_64-linux-gnu', 'lib/i686-linux-gnu'])
+
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
         os.path.join(_tree, _arch)
-        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
-        for _arch in ('lib64', 'lib')
+        for _tree in ('/usr', '/usr/local', '/usr/X11', '/')
+        for _arch in _archs
     )
     return default_header_dirs, default_library_dirs
 
@@ -59,6 +67,14 @@
 
 
 def check_prefix(inc_dir, lib_dir):
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            print("Since you are using multiarch distro it's hard to detect")
+            print("whether library mathes the header file. We will assume")
+            print("it does. If you encounter any build failures please use")
+            print("proper cfg files to provide path to the dependencies")
+            return (inc_dir, lib_dir)
     prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
     if prefix is not '' and prefix == os.path.dirname(inc_dir):
         return (inc_dir, lib_dir)
@@ -69,20 +85,29 @@
 
 
 def get_location_from_ctypes(header, library):
+    yt_inst = os.environ.get('YT_DEST')
+    if yt_inst is not None:
+        # since we preffer installation via scirpt, make sure
+        # that YT_DEST path take precedence above all else
+        return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
+
     try:
         import ctypes
         import ctypes.util
     except ImportError:
         return (None, None)
 
+    target_inc, target_libdir = None, None
     default_header_dirs, default_library_dirs = get_default_dirs()
-    target_inc, target_libdir = None, None
     for inc_prefix in default_header_dirs:
         if os.path.isfile(os.path.join(inc_prefix, header)):
             target_inc = inc_prefix
 
     target_libfile = ctypes.util.find_library(library)
-    if target_libfile is not None and os.path.isfile(target_libfile):
+    if None in (target_inc, target_libfile):
+        # either header or lib was not found, abort now
+        return (None, None)
+    if os.path.isfile(target_libfile):
         return check_prefix(target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/7239377a88f8/
Changeset:   7239377a88f8
Branch:      yt
User:        xarthisius
Date:        2013-07-04 20:32:35
Summary:     Add multiarch default dir for i386
Affected #:  1 file

diff -r a022794fbc224507781e06a8ed7a66773f4d8b7b -r 7239377a88f83e148a55dd3c0208b12ac6e47678 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -37,7 +37,8 @@
     if platform.system() == 'Linux':
         distname, version, did = platform.linux_distribution()
         if distname in ('Ubuntu', 'Debian'):
-            _archs.extend(['lib/x86_64-linux-gnu', 'lib/i686-linux-gnu'])
+            _archs.extend(
+                ['lib/x86_64-linux-gnu', 'lib/i686-linux-gnu', 'lib/i386-linux-gnu'])
 
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(


https://bitbucket.org/yt_analysis/yt-3.0/commits/389d3baba45a/
Changeset:   389d3baba45a
Branch:      yt
User:        xarthisius
Date:        2013-07-05 08:35:24
Summary:     Fix spelling errors, pep8
Affected #:  1 file

diff -r 7239377a88f83e148a55dd3c0208b12ac6e47678 -r 389d3baba45a79bf07638f072589ef8f63370968 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -38,7 +38,10 @@
         distname, version, did = platform.linux_distribution()
         if distname in ('Ubuntu', 'Debian'):
             _archs.extend(
-                ['lib/x86_64-linux-gnu', 'lib/i686-linux-gnu', 'lib/i386-linux-gnu'])
+                ['lib/x86_64-linux-gnu',
+                 'lib/i686-linux-gnu',
+                 'lib/i386-linux-gnu']
+            )
 
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
@@ -72,7 +75,7 @@
         distname, version, did = platform.linux_distribution()
         if distname in ('Ubuntu', 'Debian'):
             print("Since you are using multiarch distro it's hard to detect")
-            print("whether library mathes the header file. We will assume")
+            print("whether library matches the header file. We will assume")
             print("it does. If you encounter any build failures please use")
             print("proper cfg files to provide path to the dependencies")
             return (inc_dir, lib_dir)
@@ -88,7 +91,7 @@
 def get_location_from_ctypes(header, library):
     yt_inst = os.environ.get('YT_DEST')
     if yt_inst is not None:
-        # since we preffer installation via scirpt, make sure
+        # since we prefer installation via script, make sure
         # that YT_DEST path take precedence above all else
         return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/432d2a1883d2/
Changeset:   432d2a1883d2
Branch:      yt
User:        MatthewTurk
Date:        2013-07-08 15:16:33
Summary:     Merged in xarthisius/yt (pull request #543)

Sanitize library searching to take into account multiarch. Always rely on deps installed via script if YT_DEST is installed
Affected #:  1 file

diff -r c3e296975053621f936fba611bea53b714fa2ae5 -r 432d2a1883d262ad9ada8d98dd36fbe638fa45b1 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -3,6 +3,7 @@
 import sys
 import os.path
 import glob
+import platform
 
 
 # snatched from PyTables
@@ -23,6 +24,8 @@
 # snatched from PyTables
 def get_default_dirs():
     default_header_dirs = []
+    default_library_dirs = []
+
     add_from_path("CPATH", default_header_dirs)
     add_from_path("C_INCLUDE_PATH", default_header_dirs)
     add_from_flags("CPPFLAGS", "-I", default_header_dirs)
@@ -30,12 +33,21 @@
         ['/usr/include', '/usr/local/include', '/usr/X11']
     )
 
-    default_library_dirs = []
+    _archs = ['lib64', 'lib']
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            _archs.extend(
+                ['lib/x86_64-linux-gnu',
+                 'lib/i686-linux-gnu',
+                 'lib/i386-linux-gnu']
+            )
+
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
         os.path.join(_tree, _arch)
-        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
-        for _arch in ('lib64', 'lib')
+        for _tree in ('/usr', '/usr/local', '/usr/X11', '/')
+        for _arch in _archs
     )
     return default_header_dirs, default_library_dirs
 
@@ -59,6 +71,14 @@
 
 
 def check_prefix(inc_dir, lib_dir):
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            print("Since you are using multiarch distro it's hard to detect")
+            print("whether library matches the header file. We will assume")
+            print("it does. If you encounter any build failures please use")
+            print("proper cfg files to provide path to the dependencies")
+            return (inc_dir, lib_dir)
     prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
     if prefix is not '' and prefix == os.path.dirname(inc_dir):
         return (inc_dir, lib_dir)
@@ -69,20 +89,29 @@
 
 
 def get_location_from_ctypes(header, library):
+    yt_inst = os.environ.get('YT_DEST')
+    if yt_inst is not None:
+        # since we prefer installation via script, make sure
+        # that YT_DEST path take precedence above all else
+        return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
+
     try:
         import ctypes
         import ctypes.util
     except ImportError:
         return (None, None)
 
+    target_inc, target_libdir = None, None
     default_header_dirs, default_library_dirs = get_default_dirs()
-    target_inc, target_libdir = None, None
     for inc_prefix in default_header_dirs:
         if os.path.isfile(os.path.join(inc_prefix, header)):
             target_inc = inc_prefix
 
     target_libfile = ctypes.util.find_library(library)
-    if target_libfile is not None and os.path.isfile(target_libfile):
+    if None in (target_inc, target_libfile):
+        # either header or lib was not found, abort now
+        return (None, None)
+    if os.path.isfile(target_libfile):
         return check_prefix(target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/dc578c93d942/
Changeset:   dc578c93d942
Branch:      yt
User:        ngoldbaum
Date:        2013-06-30 11:32:59
Summary:     Fixing a comment in physical_constants.py
Affected #:  1 file

diff -r 62e723e2f60c980f48fca5f76f5fdd8862945b98 -r dc578c93d942e2c892831edaeedfb11803467096 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,7 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
+rho_crit_now = 1.8788e-29  # g/cm^3/h^2 (critical mass for closure, Cosmology)
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


https://bitbucket.org/yt_analysis/yt-3.0/commits/7678a3f41321/
Changeset:   7678a3f41321
Branch:      yt
User:        ngoldbaum
Date:        2013-06-30 11:41:22
Summary:     Fixing the explanatory comment as well.
Affected #:  1 file

diff -r dc578c93d942e2c892831edaeedfb11803467096 -r 7678a3f413216898b2628c51556ade125b2d3db8 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,7 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g/cm^3/h^2 (critical mass for closure, Cosmology)
+rho_crit_now = 1.8788e-29  # g/cm^3/h^2 (cosmological critical density)
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


https://bitbucket.org/yt_analysis/yt-3.0/commits/e57c0ae480c4/
Changeset:   e57c0ae480c4
Branch:      yt
User:        ngoldbaum
Date:        2013-07-01 20:35:02
Summary:     Adding some explanatory text for the units of rho_crit_now.
Affected #:  1 file

diff -r 7678a3f413216898b2628c51556ade125b2d3db8 -r e57c0ae480c40fa300153836be43b7d47b548d32 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,13 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g/cm^3/h^2 (cosmological critical density)
+# The following value was calcualted assuming H = 100 km/s/Mpc.
+# To get the correct value for your cosmological parameters, 
+# you'll need to multiply through by h^2
+# [where h = H / (100 km/s/Mpc)].  See the Overdensity field in
+# yt.data_objects.universal_fields.
+rho_crit_now = 1.8788e-29  # g/cm^3 (cosmological critical density)
+
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


https://bitbucket.org/yt_analysis/yt-3.0/commits/0f59ef0d7dc6/
Changeset:   0f59ef0d7dc6
Branch:      yt
User:        brittonsmith
Date:        2013-07-08 17:02:49
Summary:     Merged in ngoldbaum/yt (pull request #539)

Fixing a comment in physical_constants.py
Affected #:  1 file

diff -r 432d2a1883d262ad9ada8d98dd36fbe638fa45b1 -r 0f59ef0d7dc696a4c8fdab557eb544ca87942b6c yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,13 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
+# The following value was calcualted assuming H = 100 km/s/Mpc.
+# To get the correct value for your cosmological parameters, 
+# you'll need to multiply through by h^2
+# [where h = H / (100 km/s/Mpc)].  See the Overdensity field in
+# yt.data_objects.universal_fields.
+rho_crit_now = 1.8788e-29  # g/cm^3 (cosmological critical density)
+
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


https://bitbucket.org/yt_analysis/yt-3.0/commits/8df2a87c5098/
Changeset:   8df2a87c5098
Branch:      yt
User:        ngoldbaum
Date:        2013-07-08 22:42:17
Summary:     Fixing errors discovered via pyflakes.
Affected #:  1 file

diff -r 213d534593fb056814ce918c902e4fc3ea3360e9 -r 8df2a87c509837f8573fcffe776955614b2c2856 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -37,11 +37,9 @@
 from functools import wraps
 from numbers import Number
 
-from ._mpl_imports import \
-    FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+from ._mpl_imports import FigureCanvasAgg
 from .color_maps import yt_colormaps, is_colormap
-from .image_writer import \
-    write_image, apply_colormap
+from .image_writer import apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer, \
@@ -52,21 +50,20 @@
 from .base_plot_types import ImagePlotMPL
 
 from yt.utilities.delaunay.triangulate import Triangulation as triang
-from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
     fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
-    x_dict, x_names, \
-    y_dict, y_names, \
+    x_dict, y_dict, \
     axis_names, axis_labels, \
     formatted_length_unit_names
 from yt.utilities.math_utils import \
     ortho_find
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    GroupOwnership
-from yt.utilities.exceptions import YTUnitNotRecognized, YTInvalidWidthError
+from yt.utilities.exceptions import \
+     YTUnitNotRecognized, YTInvalidWidthError, YTCannotParseUnitDisplayName, \
+     YTNotInsideNotebook
+
 from yt.data_objects.time_series import \
     TimeSeriesData
 
@@ -539,12 +536,6 @@
             self.center = new_center
         self.set_window(self.bounds)
 
-    @property
-    def width(self):
-        Wx = self.xlim[1] - self.xlim[0]
-        Wy = self.ylim[1] - self.ylim[0]
-        return (Wx, Wy)
-
     @invalidate_data
     def set_antialias(self,aa):
         self.antialias = aa
@@ -839,10 +830,6 @@
         return xc, yc
 
     def _setup_plots(self):
-        if self._current_field is not None:
-            fields = [self._current_field]
-        else:
-            fields = self._frb.keys()
         self._colorbar_valid = True
         for f in self.fields:
             axis_index = self.data_source.axis
@@ -1087,7 +1074,6 @@
             # IPython v0.14+
             from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
-            canvas = FigureCanvasAgg(v.figure)
             display(v.figure)
 
     def show(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/577bb3ce6fc3/
Changeset:   577bb3ce6fc3
Branch:      yt
User:        MatthewTurk
Date:        2013-07-10 15:25:02
Summary:     Merged in ngoldbaum/yt (pull request #549)

Fixing errors discovered via pyflakes.
Affected #:  1 file

diff -r 0f59ef0d7dc696a4c8fdab557eb544ca87942b6c -r 577bb3ce6fc3e6d71130be84d03f46201f8cac7b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -37,11 +37,9 @@
 from functools import wraps
 from numbers import Number
 
-from ._mpl_imports import \
-    FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS
+from ._mpl_imports import FigureCanvasAgg
 from .color_maps import yt_colormaps, is_colormap
-from .image_writer import \
-    write_image, apply_colormap
+from .image_writer import apply_colormap
 from .fixed_resolution import \
     FixedResolutionBuffer, \
     ObliqueFixedResolutionBuffer, \
@@ -52,21 +50,20 @@
 from .base_plot_types import ImagePlotMPL
 
 from yt.utilities.delaunay.triangulate import Triangulation as triang
-from yt.config import ytcfg
 from yt.funcs import \
     mylog, defaultdict, iterable, ensure_list, \
     fix_axis, get_image_suffix
 from yt.utilities.lib import write_png_to_string
 from yt.utilities.definitions import \
-    x_dict, x_names, \
-    y_dict, y_names, \
+    x_dict, y_dict, \
     axis_names, axis_labels, \
     formatted_length_unit_names
 from yt.utilities.math_utils import \
     ortho_find
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    GroupOwnership
-from yt.utilities.exceptions import YTUnitNotRecognized, YTInvalidWidthError
+from yt.utilities.exceptions import \
+     YTUnitNotRecognized, YTInvalidWidthError, YTCannotParseUnitDisplayName, \
+     YTNotInsideNotebook
+
 from yt.data_objects.time_series import \
     TimeSeriesData
 
@@ -539,12 +536,6 @@
             self.center = new_center
         self.set_window(self.bounds)
 
-    @property
-    def width(self):
-        Wx = self.xlim[1] - self.xlim[0]
-        Wy = self.ylim[1] - self.ylim[0]
-        return (Wx, Wy)
-
     @invalidate_data
     def set_antialias(self,aa):
         self.antialias = aa
@@ -839,10 +830,6 @@
         return xc, yc
 
     def _setup_plots(self):
-        if self._current_field is not None:
-            fields = [self._current_field]
-        else:
-            fields = self._frb.keys()
         self._colorbar_valid = True
         for f in self.fields:
             axis_index = self.data_source.axis
@@ -1087,7 +1074,6 @@
             # IPython v0.14+
             from IPython.core.display import display
         for k, v in sorted(self.plots.iteritems()):
-            canvas = FigureCanvasAgg(v.figure)
             display(v.figure)
 
     def show(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/75f379501f60/
Changeset:   75f379501f60
Branch:      yt
User:        ngoldbaum
Date:        2013-07-09 04:31:47
Summary:     This should make it so StreamStaticOutput.__init__ only runs once.
Affected #:  1 file

diff -r 213d534593fb056814ce918c902e4fc3ea3360e9 -r 75f379501f60899606b419886e4650afafdd0a3f yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,9 +55,11 @@
             mylog.debug("Registering: %s as %s", name, cls)
 
     def __new__(cls, filename=None, *args, **kwargs):
+        from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            if not isinstance(filename, StreamHandler):
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)


https://bitbucket.org/yt_analysis/yt-3.0/commits/475d4521d1ab/
Changeset:   475d4521d1ab
Branch:      yt
User:        ngoldbaum
Date:        2013-07-09 04:38:15
Summary:     Merged yt_analysis/yt into yt
Affected #:  4 files

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -3703,7 +3703,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/fortran_utils.py
--- /dev/null
+++ b/yt/utilities/fortran_utils.py
@@ -0,0 +1,243 @@
+"""
+Utilities for reading Fortran files.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2012 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import struct
+import numpy as np
+import os
+
+def read_attrs(f, attrs,endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    according to a definition of attributes, returning a dictionary.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.  Note that this
+    function is used for reading sequentially-written records.  If you have
+    many written that were written simultaneously, see read_record.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    attrs : iterable of iterables
+        This object should be an iterable of one of the formats: 
+        [ (attr_name, count, struct type), ... ].
+        [ ((name1,name2,name3),count, vector type]
+        [ ((name1,name2,name3),count, 'type type type']
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_attrs(f, header)
+    """
+    vv = {}
+    net_format = endian
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        net_format += "".join(["I"] + ([t] * n) + ["I"])
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vv = {}
+    for a, n, t in attrs:
+        for end in '@=<>':
+            t = t.replace(end,'')
+        if type(a)==tuple:
+            n = len(a)
+        s1 = vals.pop(0)
+        v = [vals.pop(0) for i in range(n)]
+        s2 = vals.pop(0)
+        if s1 != s2:
+            size = struct.calcsize(endian + "I" + "".join(n*[t]) + "I")
+        assert(s1 == s2)
+        if n == 1: v = v[0]
+        if type(a)==tuple:
+            assert len(a) == len(v)
+            for k,val in zip(a,v):
+                vv[k]=val
+        else:
+            vv[a] = v
+    return vv
+
+def read_vector(f, d, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a vector of values.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    d : data type
+        This is the datatype (from the struct module) that we should read.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    tr : numpy.ndarray
+        This is the vector of values read from the file.
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_vector(f, 'd')
+    """
+    pad_fmt = "%sI" % (endian)
+    pad_size = struct.calcsize(pad_fmt)
+    vec_len = struct.unpack(pad_fmt,f.read(pad_size))[0] # bytes
+    vec_fmt = "%s%s" % (endian, d)
+    vec_size = struct.calcsize(vec_fmt)
+    if vec_len % vec_size != 0:
+        print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
+        raise RuntimeError
+    vec_num = vec_len / vec_size
+    if isinstance(f, file): # Needs to be explicitly a file
+        tr = np.fromfile(f, vec_fmt, count=vec_num)
+    else:
+        tr = np.fromstring(f.read(vec_len), vec_fmt, count=vec_num)
+    vec_len2 = struct.unpack(pad_fmt,f.read(pad_size))[0]
+    assert(vec_len == vec_len2)
+    return tr
+
+def skip(f, n=1, endian='='):
+    r"""This function accepts a file pointer and skips a Fortran unformatted
+    record. Optionally check that the skip was done correctly by checking 
+    the pad bytes.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    n : int
+        Number of records to skip.
+    check : bool
+        Assert that the pad bytes are equal
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    skipped: The number of elements in the skipped array
+
+    Examples
+    --------
+
+    >>> f = open("fort.3", "rb")
+    >>> skip(f, 3)
+    """
+    skipped = 0
+    pos = f.tell()
+    for i in range(n):
+        fmt = endian+"I"
+        size = f.read(struct.calcsize(fmt))
+        s1= struct.unpack(fmt, size)[0]
+        f.seek(s1+ struct.calcsize(fmt), os.SEEK_CUR)
+        s2= struct.unpack(fmt, size)[0]
+        assert s1==s2 
+        skipped += s1/struct.calcsize(fmt)
+    return skipped
+
+def peek_record_size(f,endian='='):
+    r""" This function accept the file handle and returns
+    the size of the next record and then rewinds the file
+    to the previous position.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    Number of bytes in the next record
+    """
+    pos = f.tell()
+    s = struct.unpack('>i', f.read(struct.calcsize('>i')))
+    f.seek(pos)
+    return s[0]
+
+def read_record(f, rspec, endian='='):
+    r"""This function accepts a file pointer and reads from that file pointer
+    a single "record" with different components.
+
+    Fortran unformatted files provide total bytesize at the beginning and end
+    of a record.  By correlating the components of that record with attribute
+    names, we construct a dictionary that gets returned.
+
+    Parameters
+    ----------
+    f : File object
+        An open file object.  Should have been opened in mode rb.
+    rspec : iterable of iterables
+        This object should be an iterable of the format [ (attr_name, count,
+        struct type), ... ].
+    endian : str
+        '=' is native, '>' is big, '<' is little endian
+
+    Returns
+    -------
+    values : dict
+        This will return a dict of iterables of the components of the values in
+        the file.
+
+    Examples
+    --------
+
+    >>> header = [ ("ncpu", 1, "i"), ("nfiles", 2, "i") ]
+    >>> f = open("fort.3", "rb")
+    >>> rv = read_record(f, header)
+    """
+    vv = {}
+    net_format = endian + "I"
+    for a, n, t in rspec:
+        t = t if len(t)==1 else t[-1]
+        net_format += "%s%s"%(n, t)
+    net_format += "I"
+    size = struct.calcsize(net_format)
+    vals = list(struct.unpack(net_format, f.read(size)))
+    vvv = vals[:]
+    s1, s2 = vals.pop(0), vals.pop(-1)
+    if s1 != s2:
+        print "S1 = %s ; S2 = %s ; SIZE = %s"
+        raise RuntimeError
+    pos = 0
+    for a, n, t in rspec:
+        vv[a] = vals[pos:pos+n]
+        pos += n
+    return vv
+

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -26,7 +26,13 @@
 gravitational_constant_cgs  = 6.67428e-8  # cm^3 g^-1 s^-2
 planck_constant_cgs   = 6.62606896e-27  # erg s
 stefan_boltzmann_constant_cgs = 5.67051e-5 # erg cm^-2 s^-1 K^-4
-rho_crit_now = 1.8788e-29  # g times h^2 (critical mass for closure, Cosmology)
+# The following value was calcualted assuming H = 100 km/s/Mpc.
+# To get the correct value for your cosmological parameters, 
+# you'll need to multiply through by h^2
+# [where h = H / (100 km/s/Mpc)].  See the Overdensity field in
+# yt.data_objects.universal_fields.
+rho_crit_now = 1.8788e-29  # g/cm^3 (cosmological critical density)
+
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22

diff -r 75f379501f60899606b419886e4650afafdd0a3f -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -3,6 +3,7 @@
 import sys
 import os.path
 import glob
+import platform
 
 
 # snatched from PyTables
@@ -23,6 +24,8 @@
 # snatched from PyTables
 def get_default_dirs():
     default_header_dirs = []
+    default_library_dirs = []
+
     add_from_path("CPATH", default_header_dirs)
     add_from_path("C_INCLUDE_PATH", default_header_dirs)
     add_from_flags("CPPFLAGS", "-I", default_header_dirs)
@@ -30,12 +33,21 @@
         ['/usr/include', '/usr/local/include', '/usr/X11']
     )
 
-    default_library_dirs = []
+    _archs = ['lib64', 'lib']
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            _archs.extend(
+                ['lib/x86_64-linux-gnu',
+                 'lib/i686-linux-gnu',
+                 'lib/i386-linux-gnu']
+            )
+
     add_from_flags("LDFLAGS", "-L", default_library_dirs)
     default_library_dirs.extend(
         os.path.join(_tree, _arch)
-        for _tree in ('/', '/usr', '/usr/local', '/usr/X11')
-        for _arch in ('lib64', 'lib')
+        for _tree in ('/usr', '/usr/local', '/usr/X11', '/')
+        for _arch in _archs
     )
     return default_header_dirs, default_library_dirs
 
@@ -59,6 +71,14 @@
 
 
 def check_prefix(inc_dir, lib_dir):
+    if platform.system() == 'Linux':
+        distname, version, did = platform.linux_distribution()
+        if distname in ('Ubuntu', 'Debian'):
+            print("Since you are using multiarch distro it's hard to detect")
+            print("whether library matches the header file. We will assume")
+            print("it does. If you encounter any build failures please use")
+            print("proper cfg files to provide path to the dependencies")
+            return (inc_dir, lib_dir)
     prefix = os.path.commonprefix([inc_dir, lib_dir]).rstrip('/\\')
     if prefix is not '' and prefix == os.path.dirname(inc_dir):
         return (inc_dir, lib_dir)
@@ -69,20 +89,29 @@
 
 
 def get_location_from_ctypes(header, library):
+    yt_inst = os.environ.get('YT_DEST')
+    if yt_inst is not None:
+        # since we prefer installation via script, make sure
+        # that YT_DEST path take precedence above all else
+        return (os.path.join(yt_inst, 'include'), os.path.join(yt_inst, 'lib'))
+
     try:
         import ctypes
         import ctypes.util
     except ImportError:
         return (None, None)
 
+    target_inc, target_libdir = None, None
     default_header_dirs, default_library_dirs = get_default_dirs()
-    target_inc, target_libdir = None, None
     for inc_prefix in default_header_dirs:
         if os.path.isfile(os.path.join(inc_prefix, header)):
             target_inc = inc_prefix
 
     target_libfile = ctypes.util.find_library(library)
-    if target_libfile is not None and os.path.isfile(target_libfile):
+    if None in (target_inc, target_libfile):
+        # either header or lib was not found, abort now
+        return (None, None)
+    if os.path.isfile(target_libfile):
         return check_prefix(target_inc, os.path.dirname(target_libfile))
     for lib_dir in default_library_dirs:
         try:


https://bitbucket.org/yt_analysis/yt-3.0/commits/0203a30a0271/
Changeset:   0203a30a0271
Branch:      yt
User:        ngoldbaum
Date:        2013-07-10 19:02:12
Summary:     Duck typing to avoid an import.
Affected #:  1 file

diff -r 475d4521d1ab9fe8a3c708c0d1e1b03d83825c88 -r 0203a30a0271d5714ea7c02f9766341f9b2946dc yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -55,10 +55,13 @@
             mylog.debug("Registering: %s as %s", name, cls)
 
     def __new__(cls, filename=None, *args, **kwargs):
-        from yt.frontends.stream.data_structures import StreamHandler
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            if not isinstance(filename, StreamHandler):
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
                 obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)


https://bitbucket.org/yt_analysis/yt-3.0/commits/a745781d8c7d/
Changeset:   a745781d8c7d
Branch:      yt
User:        MatthewTurk
Date:        2013-07-10 19:04:54
Summary:     Merged in ngoldbaum/yt (pull request #550)

Fixing a bug that causes StaticOutput.__init__() to run twice for Stream datasets
Affected #:  1 file

diff -r 577bb3ce6fc3e6d71130be84d03f46201f8cac7b -r a745781d8c7dde89973c1e8cac7da07aa564036e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -57,7 +57,12 @@
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)


https://bitbucket.org/yt_analysis/yt-3.0/commits/9b584c637f09/
Changeset:   9b584c637f09
Branch:      yt
User:        samskillman
Date:        2013-07-10 18:18:40
Summary:     Fix alpha blending in line drawing for grey_opacity=True.  This now correctly
blends and renormalizes the RGB channels based on the alpha channel. Before
the color of the lines were completely over-powering the underlying rendering.
Affected #:  1 file

diff -r 577bb3ce6fc3e6d71130be84d03f46201f8cac7b -r 9b584c637f0906e6bce1a52dca7c439495a75235 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -127,7 +127,7 @@
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
     cdef int nl = xs.shape[0]
-    cdef np.float64_t alpha[4]
+    cdef np.float64_t alpha[4], outa
     cdef int i, j
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
@@ -158,17 +158,22 @@
             elif (x0 >= nx-thick+1 and sx == 1): break
             elif (y0 < thick and sy == -1): break
             elif (y0 >= ny-thick+1 and sy == 1): break
-            if (x0 >=thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick):
-                if has_alpha:
-                    for i in range(4):
-                        image[x0-thick/2:x0+(1+thick)/2, 
-                              y0-thick/2:y0+(1+thick)/2,i] = \
-                                (1.-alpha[3])*image[x0,y0,i] + alpha[i]
-                else:
-                    for i in range(3):
-                        image[x0-thick/2:x0+(1+thick)/2, 
-                              y0-thick/2:y0+(1+thick)/2,i] = \
-                                (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+            if (x0 < thick or x0 >= nx-thick or y0 < thick or y0 >= ny-thick):
+                break
+            for xi in range(x0-thick/2, x0+(1+thick)/2):
+                for yi in range(y0-thick/2, y0+(1+thick)/2):
+                    if has_alpha:
+                        image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3]) 
+                        if outa != 0.0:
+                            outa = 1.0/outa
+                        for i in range(3):
+                            image[xi, yi, i] = \
+                                    ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3] 
+                                     + alpha[3]*alpha[i])*outa
+                    else:
+                        for i in range(3):
+                            image[xi, yi, i] = \
+                                    (1.-alpha[i])*image[xi,yi,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break


https://bitbucket.org/yt_analysis/yt-3.0/commits/ad11ffa1a7b5/
Changeset:   ad11ffa1a7b5
Branch:      yt
User:        samskillman
Date:        2013-07-10 18:48:37
Summary:     Can't just break out or else you won't draw lines that begin outside of the image.
Affected #:  1 file

diff -r 9b584c637f0906e6bce1a52dca7c439495a75235 -r ad11ffa1a7b5b7665f8e3269e1b3d1e875c04743 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -158,22 +158,21 @@
             elif (x0 >= nx-thick+1 and sx == 1): break
             elif (y0 < thick and sy == -1): break
             elif (y0 >= ny-thick+1 and sy == 1): break
-            if (x0 < thick or x0 >= nx-thick or y0 < thick or y0 >= ny-thick):
-                break
-            for xi in range(x0-thick/2, x0+(1+thick)/2):
-                for yi in range(y0-thick/2, y0+(1+thick)/2):
-                    if has_alpha:
-                        image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3]) 
-                        if outa != 0.0:
-                            outa = 1.0/outa
-                        for i in range(3):
-                            image[xi, yi, i] = \
-                                    ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3] 
-                                     + alpha[3]*alpha[i])*outa
-                    else:
-                        for i in range(3):
-                            image[xi, yi, i] = \
-                                    (1.-alpha[i])*image[xi,yi,i] + alpha[i]
+            if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
+                for xi in range(x0-thick/2, x0+(1+thick)/2):
+                    for yi in range(y0-thick/2, y0+(1+thick)/2):
+                        if has_alpha:
+                            image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
+                            if outa != 0.0:
+                                outa = 1.0/outa
+                            for i in range(3):
+                                image[xi, yi, i] = \
+                                        ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3]
+                                         + alpha[3]*alpha[i])*outa
+                        else:
+                            for i in range(3):
+                                image[xi, yi, i] = \
+                                        (1.-alpha[i])*image[xi,yi,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break


https://bitbucket.org/yt_analysis/yt-3.0/commits/f1da09f71bbb/
Changeset:   f1da09f71bbb
Branch:      yt
User:        MatthewTurk
Date:        2013-07-11 14:41:56
Summary:     Merged in samskillman/yt (pull request #553)

Fix for grey_opacity grid line drawing
Affected #:  1 file

diff -r a745781d8c7dde89973c1e8cac7da07aa564036e -r f1da09f71bbb45215639285c3071695b55e2e6bf yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -127,7 +127,7 @@
     cdef int nx = image.shape[0]
     cdef int ny = image.shape[1]
     cdef int nl = xs.shape[0]
-    cdef np.float64_t alpha[4]
+    cdef np.float64_t alpha[4], outa
     cdef int i, j
     cdef int dx, dy, sx, sy, e2, err
     cdef np.int64_t x0, x1, y0, y1
@@ -158,17 +158,21 @@
             elif (x0 >= nx-thick+1 and sx == 1): break
             elif (y0 < thick and sy == -1): break
             elif (y0 >= ny-thick+1 and sy == 1): break
-            if (x0 >=thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick):
-                if has_alpha:
-                    for i in range(4):
-                        image[x0-thick/2:x0+(1+thick)/2, 
-                              y0-thick/2:y0+(1+thick)/2,i] = \
-                                (1.-alpha[3])*image[x0,y0,i] + alpha[i]
-                else:
-                    for i in range(3):
-                        image[x0-thick/2:x0+(1+thick)/2, 
-                              y0-thick/2:y0+(1+thick)/2,i] = \
-                                (1.-alpha[i])*image[x0,y0,i] + alpha[i]
+            if x0 >= thick and x0 < nx-thick and y0 >= thick and y0 < ny-thick:
+                for xi in range(x0-thick/2, x0+(1+thick)/2):
+                    for yi in range(y0-thick/2, y0+(1+thick)/2):
+                        if has_alpha:
+                            image[xi, yi, 3] = outa = alpha[3] + image[xi, yi, 3]*(1-alpha[3])
+                            if outa != 0.0:
+                                outa = 1.0/outa
+                            for i in range(3):
+                                image[xi, yi, i] = \
+                                        ((1.-alpha[3])*image[xi, yi, i]*image[xi, yi, 3]
+                                         + alpha[3]*alpha[i])*outa
+                        else:
+                            for i in range(3):
+                                image[xi, yi, i] = \
+                                        (1.-alpha[i])*image[xi,yi,i] + alpha[i]
 
             if (x0 == x1 and y0 == y1):
                 break


https://bitbucket.org/yt_analysis/yt-3.0/commits/4e6c9e9f4eea/
Changeset:   4e6c9e9f4eea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-07-12 03:48:07
Summary:     Merging from yt development branch
Affected #:  34 files

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
 import subprocess
 import shutil
 import glob
-import distribute_setup
-distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+    import distribute_setup
+    distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
 # End snippet
 ######
 
-import setuptools
-
 VERSION = "3.0dev"
 
 if os.path.exists('MANIFEST'):

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
 """
 
 __version__ = "3.0-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys
+    from yt.config import ytcfg
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+    ytcfg["yt","suppressStreamLogging"] = 'True'
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        ytcfg["yt","suppressStreamLogging"] = log_suppress

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
         self.layers.append(base_layer)
         self.cell_count += np.product(pf.domain_dimensions)
 
-        for grid in pf.h.grids:
+        sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+        for grid in sorted_grids:
             if grid.Level <= self.max_level:
                 self._add_grid_to_layers(grid)
 
@@ -232,11 +233,11 @@
             if p == 0:
                 ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
             else:
-                LE = np.zeros(3)
+                parent_LE = np.zeros(3)
                 for potential_parent in self.layers:
                     if potential_parent.id == p:
-                        LE = potential_parent.LeftEdge
-                ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+                        parent_LE = potential_parent.LeftEdge
+                ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
             ix  = int(ind[0]+0.5)
             iy  = int(ind[1]+0.5)
             iz  = int(ind[2]+0.5)

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -409,7 +409,8 @@
         self.left_edge = np.array(left_edge)
         self.level = level
         rdx = self.pf.domain_dimensions*self.pf.refine_by**level
-        self.dds = self.pf.domain_width/rdx.astype("float64")
+        rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
+        self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
         self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
         self._num_ghost_zones = num_ghost_zones

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1180,6 +1180,7 @@
 
 class YTValueCutExtractionBase(YTSelectionContainer3D):
     _type_name = "cut_region"
+    _con_args = ("_base_region", "_field_cuts")
     """
     In-line extracted regions accept a base region and a set of field_cuts to
     determine which points in a grid should be included.

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
+    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -75,7 +75,12 @@
     def __new__(cls, filename=None, *args, **kwargs):
         if not isinstance(filename, types.StringTypes):
             obj = object.__new__(cls)
-            obj.__init__(filename, *args, **kwargs)
+            # The Stream frontend uses a StreamHandler object to pass metadata
+            # to __init__.
+            is_stream = (hasattr(filename, 'get_fields') and
+                         hasattr(filename, 'get_particle_type'))
+            if not is_stream:
+                obj.__init__(filename, *args, **kwargs)
             return obj
         apath = os.path.abspath(filename)
         if not os.path.exists(apath): raise IOError(filename)

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1087,7 +1087,7 @@
 
     return get_sph_r_component(Bfields, theta, phi, normal)
 
-add_field("BRadial", function=_BPoloidal,
+add_field("BRadial", function=_BRadial,
           units=r"\rm{Gauss}",
           validators=[ValidateParameter("normal")])
 
@@ -1420,7 +1420,7 @@
     domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
     domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
     domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
-    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+    return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
 add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
           validators=[ValidateSpatial(1, 
                       ["x-velocity", "y-velocity", "z-velocity"])],

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -608,7 +608,7 @@
         self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
         self.dimensionality = self.parameters["TopGridRank"]
         self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = self.parameters["TopGridDimensions"]
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
         self.refine_by = self.parameters.get("RefineBy", 2)
 
         if (self.parameters.has_key("ComovingCoordinates") and

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -465,7 +465,7 @@
         try: 
             self.parameters["usecosmology"]
             self.cosmological_simulation = 1
-            self.current_redshift = self.parameters['redshift']
+            self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
             self.omega_lambda = self.parameters['cosmologicalconstant']
             self.omega_matter = self.parameters['omegamatter']
             self.hubble_constant = self.parameters['hubbleconstant']

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -76,8 +76,9 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        if self.pf.data_software != "piernik":
+            if self.pf.dimensionality < 2: self.dds[1] = 1.0
+            if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     @property
@@ -235,6 +236,11 @@
 
     def _parse_parameter_file(self):
         self._handle = h5py.File(self.parameter_filename, "r")
+        if 'data_software' in self._handle['gridded_data_format'].attrs:
+            self.data_software = \
+                self._handle['gridded_data_format'].attrs['data_software']
+        else:
+            self.data_software = "unknown"
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -24,4 +24,9 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("tiger")
+    config.add_subpackage("flash/tests")
+    config.add_subpackage("enzo/tests")
+    config.add_subpackage("orion/tests")
+    config.add_subpackage("stream/tests")
+    config.add_subpackage("chombo/tests")
     return config

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -98,7 +98,17 @@
         if param == "loglevel": # special case
             mylog.setLevel(int(val))
 
-parser = argparse.ArgumentParser(description = 'yt command line arguments')
+class YTParser(argparse.ArgumentParser):
+    def error(self, message):
+        """error(message: string)
+
+        Prints a help message that is more detailed than the argparse default
+        and then exits.
+        """
+        self.print_help(sys.stderr)
+        self.exit(2, '%s: error: %s\n' % (self.prog, message))
+
+parser = YTParser(description = 'yt command line arguments')
 parser.add_argument("--config", action=SetConfigOption,
     help = "Set configuration option, in the form param=value")
 parser.add_argument("--paste", action=SetExceptionHandling,

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
 """
-AMR kD-Tree Tools 
+AMR kD-Tree Tools
 
 Authors: Samuel Skillman <samskillman at gmail.com>
 Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
 """
 import numpy as np
 from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
-    def __init__(self, parent, left, right,
-            left_edge, right_edge, grid_id, node_id):
-        self.left = left
-        self.right = right
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.grid = grid_id
-        self.parent = parent
-        self.id = node_id
-        self.data = None
-        self.split = None
-
-class Split(object):
-    def __init__(self, dim, pos):
-        self.dim = dim
-        self.pos = pos
-
-def should_i_build(node, rank, size):
-    if (node.id < size) or (node.id >= 2*size):
-        return True
-    elif node.id - size == rank:
-        return True
-    else:
-        return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grid(node, gle, gre, gid, rank, size)
-    else:
-        less_id = gle[node.split.dim] < node.split.pos
-        if less_id:
-            add_grid(node.left, gle, gre,
-                     gid, rank, size)
-
-        greater_id = gre[node.split.dim] > node.split.pos
-        if greater_id:
-            add_grid(node.right, gle, gre,
-                     gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    # If we should continue to split based on parallelism, do so!
-    if should_i_split(node, rank, size):
-        geo_split(node, gle, gre, grid_id, rank, size)
-        return
-
-    if np.all(gle <= node.left_edge) and \
-            np.all(gre >= node.right_edge):
-        node.grid = grid_id
-        assert(node.grid is not None)
-        return
-
-    # Split the grid
-    check = split_grid(node, gle, gre, grid_id, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
-    if not should_i_build(node, rank, size):
-        return
-
-    if kd_is_leaf(node):
-        insert_grids(node, gles, gres, gids, rank, size)
-    else:
-        less_ids = gles[:,node.split.dim] < node.split.pos
-        if len(less_ids) > 0:
-            add_grids(node.left, gles[less_ids], gres[less_ids],
-                      gids[less_ids], rank, size)
-
-        greater_ids = gres[:,node.split.dim] > node.split.pos
-        if len(greater_ids) > 0:
-            add_grids(node.right, gles[greater_ids], gres[greater_ids],
-                      gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
-    return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
-    big_dim = np.argmax(gre-gle)
-    new_pos = (gre[big_dim] + gle[big_dim])/2.
-    old_gre = gre.copy()
-    new_gle = gle.copy()
-    new_gle[big_dim] = new_pos
-    gre[big_dim] = new_pos
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grid(node.left, gle, gre,
-                grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grid(node.right, new_gle, old_gre,
-                grid_id, rank, size)
-    return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
-    big_dim = np.argmax(gres[0]-gles[0])
-    new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
-    old_gre = gres[0].copy()
-    new_gle = gles[0].copy()
-    new_gle[big_dim] = new_pos
-    gres[0][big_dim] = new_pos
-    gles = np.append(gles, np.array([new_gle]), axis=0)
-    gres = np.append(gres, np.array([old_gre]), axis=0)
-    grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
-    split = Split(big_dim, new_pos)
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[:1], gres[:1],
-            grid_ids[:1], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[1:], gres[1:],
-            grid_ids[1:], rank, size)
-    return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
-    if not should_i_build(node, rank, size) or grid_ids.size == 0:
-        return
-
-    if len(grid_ids) == 1:
-        # If we should continue to split based on parallelism, do so!
-        if should_i_split(node, rank, size):
-            geo_split(node, gles, gres, grid_ids, rank, size)
-            return
-
-        if np.all(gles[0] <= node.left_edge) and \
-                np.all(gres[0] >= node.right_edge):
-            node.grid = grid_ids[0]
-            assert(node.grid is not None)
-            return
-
-    # Split the grids
-    check = split_grids(node, gles, gres, grid_ids, rank, size)
-    # If check is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if check == -1:
-        node.grid = None
-    return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
-    # Find a Split
-    data = np.array([(gle[:], gre[:])],  copy=False)
-    best_dim, split_pos, less_id, greater_id = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    if less_id:
-        insert_grid(node.left, gle, gre,
-                     grid_id, rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    if greater_id:
-        insert_grid(node.right, gle, gre,
-                     grid_id, rank, size)
-
-    return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
-    # Find a Split
-    data = np.array([(gles[i,:], gres[i,:]) for i in
-        xrange(grid_ids.shape[0])], copy=False)
-    best_dim, split_pos, less_ids, greater_ids = \
-        kdtree_get_choices(data, node.left_edge, node.right_edge)
-
-    # If best_dim is -1, then we have found a place where there are no choices.
-    # Exit out and set the node to None.
-    if best_dim == -1:
-        return -1
-
-    split = Split(best_dim, split_pos)
-
-    del data, best_dim, split_pos
-
-    # Create a Split
-    divide(node, split)
-
-    # Populate Left Node
-    #print 'Inserting left node', node.left_edge, node.right_edge
-    insert_grids(node.left, gles[less_ids], gres[less_ids],
-                 grid_ids[less_ids], rank, size)
-
-    # Populate Right Node
-    #print 'Inserting right node', node.left_edge, node.right_edge
-    insert_grids(node.right, gles[greater_ids], gres[greater_ids],
-                 grid_ids[greater_ids], rank, size)
-
-    return
-
-def new_right(Node, split):
-    new_right = Node.right_edge.copy()
-    new_right[split.dim] = split.pos
-    return new_right
-
-def new_left(Node, split):
-    new_left = Node.left_edge.copy()
-    new_left[split.dim] = split.pos
-    return new_left
-
-def divide(node, split):
-    # Create a Split
-    node.split = split
-    node.left = Node(node, None, None,
-            node.left_edge, new_right(node, split), node.grid,
-                     _lchild_id(node.id))
-    node.right = Node(node, None, None,
-            new_left(node, split), node.right_edge, node.grid,
-                      _rchild_id(node.id))
-    return
-
-def kd_sum_volume(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
-    if (node.left is None) and (node.right is None):
-        if node.grid is None:
-            return 0.0
-        return np.prod(node.right_edge - node.left_edge)
-    else:
-        return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
-    assert (node.left is None) == (node.right is None)
-    if (node.left is None) and (node.right is None):
-        if node.grid is not None:
-            return np.prod(node.right_edge - node.left_edge)
-        else: return 0.0
-    else:
-        return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
-    no_l_child = node.left is None
-    no_r_child = node.right is None
-    assert no_l_child == no_r_child
-    return no_l_child
-
-def step_depth(current, previous):
-    '''
-    Takes a single step in the depth-first traversal
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down, go left first
-        previous = current
-        if current.left is not None:
-            current = current.left
-        elif current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left, go right 
-        previous = current
-        if current.right is not None:
-            current = current.right
-        else:
-            current = current.parent
-
-    elif current.right is previous: # Moving up from right child, move up
-        previous = current
-        current = current.parent
-
-    return current, previous
-
-def depth_traverse(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def depth_first_touch(tree, max_node=None):
-    '''
-    Yields a depth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    if max_node is None:
-        max_node = np.inf
-    while current is not None:
-        if previous is None or previous.parent != current:
-            yield current
-        current, previous = step_depth(current, previous)
-        if current is None: break
-        if current.id >= max_node:
-            current = current.parent
-            previous = current.right
-
-def breadth_traverse(tree):
-    '''
-    Yields a breadth-first traversal of the kd tree always going to
-    the left child before the right.
-    '''
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
-    '''
-    Yields a viewpoint dependent traversal of the kd-tree.  Starts
-    with nodes furthest away from viewpoint.
-    '''
-
-    current = tree.trunk
-    previous = None
-    while current is not None:
-        yield current
-        current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
-    '''
-    Takes a single step in the viewpoint based traversal.  Always
-    goes to the node furthest away from viewpoint first.
-    '''
-    if kd_is_leaf(current): # At a leaf, move back up
-        previous = current
-        current = current.parent
-    elif current.split.dim is None: # This is a dead node
-        previous = current
-        current = current.parent
-
-    elif current.parent is previous: # Moving down
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                previous = current.right
-        else:
-            if current.left is not None:
-                current = current.left
-            else:
-                previous = current.left
-
-    elif current.right is previous: # Moving up from right 
-        previous = current
-        if viewpoint[current.split.dim] <= current.split.pos:
-            if current.left is not None:
-                current = current.left
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    elif current.left is previous: # Moving up from left child
-        previous = current
-        if viewpoint[current.split.dim] > current.split.pos:
-            if current.right is not None:
-                current = current.right
-            else:
-                current = current.parent
-        else:
-            current = current.parent
-
-    return current, previous
 
 
 def receive_and_reduce(comm, incoming_rank, image, add_to_front):
-    mylog.debug( 'Receiving image from %04i' % incoming_rank)
+    mylog.debug('Receiving image from %04i' % incoming_rank)
     #mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
     arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
         (image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
         np.add(image, front, image)
         return image
 
-    ta = 1.0 - front[:,:,3]
+    ta = 1.0 - front[:, :, 3]
     np.maximum(ta, 0.0, ta)
     # This now does the following calculation, but in a memory
     # conservative fashion
     # image[:,:,i  ] = front[:,:,i] + ta*back[:,:,i]
     image = back.copy()
     for i in range(4):
-        np.multiply(image[:,:,i], ta, image[:,:,i])
+        np.multiply(image[:, :, i], ta, image[:, :, i])
     np.add(image, front, image)
     return image
 
+
 def send_to_parent(comm, outgoing_rank, image):
-    mylog.debug( 'Sending image to %04i' % outgoing_rank)
+    mylog.debug('Sending image to %04i' % outgoing_rank)
     comm.send_array(image, outgoing_rank, tag=comm.rank)
 
+
 def scatter_image(comm, root, image):
-    mylog.debug( 'Scattering from %04i' % root)
+    mylog.debug('Scattering from %04i' % root)
     image = comm.mpi_bcast(image, root=root)
     return image
-
-def find_node(node, pos):
-    """
-    Find the AMRKDTree node enclosing a position
-    """
-    assert(np.all(node.left_edge <= pos))
-    assert(np.all(node.right_edge > pos))
-    while not kd_is_leaf(node):
-        if pos[node.split.dim] < node.split.pos:
-            node = node.left
-        else:
-            node = node.right
-    return node
-

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -26,10 +26,13 @@
 from yt.funcs import *
 import numpy as np
 import h5py
-from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
-        depth_traverse, viewpoint_traverse, add_grids, \
-        receive_and_reduce, send_to_parent, scatter_image, find_node, \
-        depth_first_touch
+from amr_kdtools import \
+        receive_and_reduce, send_to_parent, scatter_image
+
+from yt.utilities.lib.amr_kdtools import Node, add_pygrids, find_node, \
+        kd_is_leaf, depth_traverse, depth_first_touch, viewpoint_traverse, \
+        kd_traverse, \
+        get_left_edge, get_right_edge, kd_sum_volume, kd_node_check
 from yt.utilities.parallel_tools.parallel_analysis_interface \
     import ParallelAnalysisInterface 
 from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -49,82 +52,85 @@
                   [ 1,  0, -1], [ 1,  0,  0], [ 1,  0,  1],
                   [ 1,  1, -1], [ 1,  1,  0], [ 1,  1,  1] ])
 
-
-def make_vcd(data, log=False):
-    new_field = np.zeros(np.array(data.shape) + 1, dtype='float64')
-    of = data
-    new_field[:-1, :-1, :-1] += of
-    new_field[:-1, :-1, 1:] += of
-    new_field[:-1, 1:, :-1] += of
-    new_field[:-1, 1:, 1:] += of
-    new_field[1:, :-1, :-1] += of
-    new_field[1:, :-1, 1:] += of
-    new_field[1:, 1:, :-1] += of
-    new_field[1:, 1:, 1:] += of
-    np.multiply(new_field, 0.125, new_field)
-    if log:
-        new_field = np.log10(new_field)
-
-    new_field[:, :, -1] = 2.0*new_field[:, :, -2] - new_field[:, :, -3]
-    new_field[:, :, 0] = 2.0*new_field[:, :, 1] - new_field[:, :, 2]
-    new_field[:, -1, :] = 2.0*new_field[:, -2, :] - new_field[:, -3, :]
-    new_field[:, 0, :] = 2.0*new_field[:, 1, :] - new_field[:, 2, :]
-    new_field[-1, :, :] = 2.0*new_field[-2, :, :] - new_field[-3, :, :]
-    new_field[0, :, :] = 2.0*new_field[1, :, :] - new_field[2, :, :]
-
-    if log: 
-        np.power(10.0, new_field, new_field)
-    return new_field
-
 class Tree(object):
-    def __init__(self, pf, comm_rank=0, comm_size=1,
-            min_level=None, max_level=None, data_source=None):
+    def __init__(self, pf, comm_rank=0, comm_size=1, left=None, right=None,
+            min_level=None, max_level=None, grids=None):
         
         self.pf = pf
-        if data_source is None:
-            data_source = pf.h.all_data()
-        self.data_source = data_source
         self._id_offset = self.pf.h.grids[0]._id_offset
+        if left is None:
+            left = np.array([-np.inf]*3)
+        if right is None:
+            right = np.array([np.inf]*3)
+
         if min_level is None: min_level = 0
         if max_level is None: max_level = pf.h.max_level
         self.min_level = min_level
         self.max_level = max_level
         self.comm_rank = comm_rank
         self.comm_size = comm_size
-        left_edge = np.array([-np.inf]*3)
-        right_edge = np.array([np.inf]*3)
         self.trunk = Node(None, None, None,
-                left_edge, right_edge, None, 1)
-        self.build()
+                left, right, -1, 1)
+        if grids is None:
+            grids = pf.h.region((left+right)/2., left, right)._grids
+        self.grids = grids
+        self.build(self.grids)
 
     def add_grids(self, grids):
-        gles = np.array([g.LeftEdge for g in grids])
-        gres = np.array([g.RightEdge for g in grids])
-        gids = np.array([g.id for g in grids])
-        add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
-        del gles, gres, gids, grids
+        lvl_range = range(self.min_level, self.max_level+1)
+        if grids is None:
+            level_iter = self.pf.hierarchy.get_levels()
+            grids_added = 0
+            while True:
+                try:
+                    grids = level_iter.next()
+                except:
+                    break
+                if grids[0].Level not in lvl_range:
+                    continue
+                if grids_added < self.comm_size:
+                    gmask = np.array([g in self.grids for g in grids])
+                    gles = np.array([g.LeftEdge for g in grids])[gmask]
+                    gres = np.array([g.RightEdge for g in grids])[gmask]
+                    gids = np.array([g.id for g in grids])[gmask]
+                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
+                              self.comm_rank,
+                              self.comm_size)
+                    grids_added += grids.size
+                    del gles, gres, gids, grids
+                else:
+                    grids_added += grids.size
+                    [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
+                              self.comm_rank, self.comm_size) for g in grids]
+            return
 
-    def build(self):
-        lvl_range = range(self.min_level, self.max_level+1)
         for lvl in lvl_range:
-            #grids = self.data_source.select_grids(lvl)
-            grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
-            if len(grids) == 0: continue 
-            self.add_grids(grids)
+            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
+            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
+            gids = np.array([g.id for g in grids if g.Level == lvl])
+
+            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
+            del gles, gres, gids
+
+
+    def build(self, grids=None):
+        self.add_grids(grids)
 
     def check_tree(self):
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
             gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
-            assert(np.all(grid.LeftEdge <= node.left_edge))
-            assert(np.all(grid.RightEdge >= node.right_edge))
+            assert(np.all(grid.LeftEdge <= nle))
+            assert(np.all(grid.RightEdge >= nre))
             assert(np.all(dims > 0))
             # print grid, dims, li, ri
 
@@ -133,79 +139,81 @@
         mylog.debug('AMRKDTree volume = %e' % vol)
         kd_node_check(self.trunk)
 
-    def sum_cells(self):
+    def sum_cells(self, all_cells=False):
         cells = 0
-        for node in depth_traverse(self):
-            if node.grid is None:
+        for node in depth_traverse(self.trunk):
+            if node.grid == -1:
+                continue
+            if not all_cells and not kd_is_leaf(node):
                 continue
             grid = self.pf.h.grids[node.grid - self._id_offset]
             dds = grid.dds
             gle = grid.LeftEdge
-            gre = grid.RightEdge
-            li = np.rint((node.left_edge-gle)/dds).astype('int32')
-            ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+            nle = get_left_edge(node)
+            nre = get_right_edge(node)
+            li = np.rint((nle-gle)/dds).astype('int32')
+            ri = np.rint((nre-gle)/dds).astype('int32')
             dims = (ri - li).astype('int32')
             cells += np.prod(dims)
-
         return cells
 
 class AMRKDTree(ParallelAnalysisInterface):
-    fields = None
-    log_fields = None
-    no_ghost = True
-    def __init__(self, pf, min_level=None, max_level=None, data_source=None):
+    def __init__(self, pf,  l_max=None, le=None, re=None,
+                 fields=None, no_ghost=False, min_level=None, max_level=None,
+                 log_fields=None,
+                 grids=None):
 
         ParallelAnalysisInterface.__init__(self)
 
         self.pf = pf
+        self.l_max = l_max
+        if max_level is None: max_level = l_max
+        if fields is None: fields = ["Density"]
+        self.fields = ensure_list(fields)
         self.current_vcds = []
         self.current_saved_grids = []
         self.bricks = []
         self.brick_dimensions = []
         self.sdx = pf.h.get_smallest_dx()
+
         self._initialized = False
-        try: 
-            self._id_offset = pf.h.grids[0]._id_offset
-        except:
-            self._id_offset = 0
+        self.no_ghost = no_ghost
+        if log_fields is not None:
+            log_fields = ensure_list(log_fields)
+        else:
+            pf.h
+            log_fields = [self.pf.field_info[field].take_log
+                         for field in self.fields]
 
-        #self.add_mask_field()
-        if data_source is None:
-            data_source = pf.h.all_data()
-        self.data_source = data_source
-    
+        self.log_fields = log_fields
+        self._id_offset = pf.h.grids[0]._id_offset
+
+        if le is None:
+            self.le = pf.domain_left_edge
+        else:
+            self.le = np.array(le)
+        if re is None:
+            self.re = pf.domain_right_edge
+        else:
+            self.re = np.array(re)
+
         mylog.debug('Building AMRKDTree')
         self.tree = Tree(pf, self.comm.rank, self.comm.size,
-                         min_level=min_level,
-                         max_level=max_level, data_source=data_source)
+                         self.le, self.re, min_level=min_level,
+                         max_level=max_level, grids=grids)
 
-    def set_fields(self, fields, log_fields, no_ghost):
-        self.fields = fields
-        self.log_fields = log_fields
-        self.no_ghost = no_ghost
-        del self.bricks, self.brick_dimensions
-        self.brick_dimensions = []
+    def initialize_source(self):
+        if self._initialized : return
         bricks = []
         for b in self.traverse():
             bricks.append(b)
         self.bricks = np.array(bricks)
         self.brick_dimensions = np.array(self.brick_dimensions)
-    
-    def initialize_source(self, fields, log_fields, no_ghost):
-        if fields == self.fields and log_fields == self.log_fields and \
-            no_ghost == self.no_ghost:
-            return
-        self.set_fields(fields, log_fields, no_ghost)
+        self._initialized = True
 
     def traverse(self, viewpoint=None):
-        if viewpoint is None:
-            for node in depth_traverse(self.tree):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
-        else:
-            for node in viewpoint_traverse(self.tree, viewpoint):
-                if kd_is_leaf(node) and node.grid is not None:
-                    yield self.get_brick_data(node)
+        for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
+            yield self.get_brick_data(node)
 
     def get_node(self, nodeid):
         path = np.binary_repr(nodeid)
@@ -226,13 +234,13 @@
         owners = {}
         for bottom_id in range(self.comm.size, 2*self.comm.size):
             temp = self.get_node(bottom_id)
-            owners[temp.id] = temp.id - self.comm.size
+            owners[temp.node_id] = temp.node_id - self.comm.size
             while temp is not None:
                 if temp.parent is None: break
                 if temp == temp.parent.right:
                     break
                 temp = temp.parent
-                owners[temp.id] = owners[temp.left.id]
+                owners[temp.node_id] = owners[temp.left.node_id]
         return owners
 
     def reduce_tree_images(self, image, viewpoint):
@@ -242,44 +250,39 @@
         owners = self.get_reduce_owners()
         node = self.get_node(nprocs + myrank)
 
-        while True:
-            if owners[node.parent.id] == myrank:
-                split = node.parent.split
-                left_in_front = viewpoint[split.dim] < node.parent.split.pos
-                #add_to_front = (left_in_front == (node == node.parent.right))
-                add_to_front = not left_in_front
-                image = receive_and_reduce(self.comm, owners[node.parent.right.id],
-                                  image, add_to_front)
-                if node.parent.id == 1: break
-                else: node = node.parent
-            else:
-                send_to_parent(self.comm, owners[node.parent.id], image)
-                break
-        image = scatter_image(self.comm, owners[1], image)
-        return image
+        while owners[node.parent.node_id] == myrank:
+            split_dim = node.parent.get_split_dim()
+            split_pos = node.parent.get_split_pos()
+            add_to_front = viewpoint[split_dim] >= split_pos
+            image = receive_and_reduce(self.comm,
+                                       owners[node.parent.right.node_id],
+                                       image, add_to_front)
+            if node.parent.node_id == 1: break
+            else: node = node.parent
+        else:
+            send_to_parent(self.comm, owners[node.parent.node_id], image)
+
+        return scatter_image(self.comm, owners[1], image)
 
     def get_brick_data(self, node):
         if node.data is not None: return node.data
         grid = self.pf.h.grids[node.grid - self._id_offset]
         dds = grid.dds
         gle = grid.LeftEdge
-        gre = grid.RightEdge
-        li = np.rint((node.left_edge-gle)/dds).astype('int32')
-        ri = np.rint((node.right_edge-gle)/dds).astype('int32')
+        nle = get_left_edge(node)
+        nre = get_right_edge(node)
+        li = np.rint((nle-gle)/dds).astype('int32')
+        ri = np.rint((nre-gle)/dds).astype('int32')
         dims = (ri - li).astype('int32')
-        assert(np.all(grid.LeftEdge <= node.left_edge))
-        assert(np.all(grid.RightEdge >= node.right_edge))
+        assert(np.all(grid.LeftEdge <= nle))
+        assert(np.all(grid.RightEdge >= nre))
 
         if grid in self.current_saved_grids:
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
-            mask = make_vcd(grid.child_mask)
-            mask = np.clip(mask, 0.0, 1.0)
-            mask[mask<0.5] = np.inf
             for i,field in enumerate(self.fields):
-                vcd = make_vcd(grid[field], log=self.log_fields[i])
-                vcd *= mask
+                vcd = grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
                 if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(grid)
@@ -290,12 +293,11 @@
                   li[2]:ri[2]+1].copy() for d in dds]
 
         brick = PartitionedGrid(grid.id, data,
-                                node.left_edge.copy(),
-                                node.right_edge.copy(),
+                                nle.copy(),
+                                nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
-        if not self._initialized: 
-            self.brick_dimensions.append(dims)
+        if not self._initialized: self.brick_dimensions.append(dims)
         return brick
 
     def locate_brick(self, position):
@@ -352,7 +354,8 @@
 
         if (in_grid != True).sum()>0:
             grids[in_grid != True] = \
-                [self.pf.h.grids[self.locate_brick(new_positions[i]).grid] 
+                [self.pf.h.grids[self.locate_brick(new_positions[i]).grid -
+                                 self._id_offset]
                  for i in get_them]
             cis[in_grid != True] = \
                 [(new_positions[i]-grids[i].LeftEdge)/
@@ -389,7 +392,8 @@
         
         """
         position = np.array(position)
-        grid = self.pf.h.grids[self.locate_brick(position).grid]
+        grid = self.pf.h.grids[self.locate_brick(position).grid -
+                               self._id_offset]
         ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
         return self.locate_neighbors(grid,ci)
 
@@ -402,7 +406,7 @@
             self.comm.recv_array(self.comm.rank-1, tag=self.comm.rank-1)
         f = h5py.File(fn,'w')
         for node in depth_traverse(self.tree):
-            i = node.id
+            i = node.node_id
             if node.data is not None:
                 for fi,field in enumerate(self.fields):
                     try:
@@ -423,8 +427,8 @@
         try:
             f = h5py.File(fn,"a")
             for node in depth_traverse(self.tree):
-                i = node.id
-                if node.grid is not None:
+                i = node.node_id
+                if node.grid != -1:
                     data = [f["brick_%s_%s" %
                               (hex(i), field)][:].astype('float64') for field in self.fields]
                     node.data = PartitionedGrid(node.grid.id, data,
@@ -473,32 +477,28 @@
         gridids = []
         splitdims = []
         splitposs = []
-        for node in depth_first_touch(self.tree):
-            nids.append(node.id) 
-            les.append(node.left_edge) 
-            res.append(node.right_edge) 
+        for node in depth_first_touch(self.tree.trunk):
+            nids.append(node.node_id) 
+            les.append(node.get_left_edge()) 
+            res.append(node.get_right_edge()) 
             if node.left is None:
                 leftids.append(-1) 
             else:
-                leftids.append(node.left.id) 
+                leftids.append(node.left.node_id) 
             if node.right is None:
                 rightids.append(-1) 
             else:
-                rightids.append(node.right.id) 
+                rightids.append(node.right.node_id) 
             if node.parent is None:
                 parentids.append(-1) 
             else:
-                parentids.append(node.parent.id) 
+                parentids.append(node.parent.node_id) 
             if node.grid is None:
                 gridids.append(-1) 
             else:
                 gridids.append(node.grid) 
-            if node.split is None:
-                splitdims.append(-1)
-                splitposs.append(np.nan)
-            else:
-                splitdims.append(node.split.dim)
-                splitposs.append(node.split.pos)
+            splitdims.append(node.get_split_dim())
+            splitposs.append(node.get_split_pos())
 
         return nids, parentids, leftids, rightids, les, res, gridids,\
                 splitdims, splitposs
@@ -515,19 +515,23 @@
         N = nids.shape[0]
         for i in xrange(N):
             n = self.get_node(nids[i])
-            n.left_edge = les[i]
-            n.right_edge = res[i]
+            n.set_left_edge(les[i])
+            n.set_right_edge(res[i])
             if lids[i] != -1 and n.left is None:
-                n.left = Node(n, None, None, None,  
-                                      None, None, lids[i])
+                n.left = Node(n, None, None, 
+                              np.zeros(3, dtype='float64'),  
+                              np.zeros(3, dtype='float64'),  
+                              -1, lids[i])
             if rids[i] != -1 and n.right is None:
-                n.right = Node(n, None, None, None, 
-                                      None, None, rids[i])
+                n.right = Node(n, None, None, 
+                               np.zeros(3, dtype='float64'),  
+                               np.zeros(3, dtype='float64'),  
+                               -1, rids[i])
             if gids[i] != -1:
                 n.grid = gids[i]
 
             if splitdims[i] != -1:
-                n.split = Split(splitdims[i], splitposs[i])
+                n.create_split(splitdims[i], splitposs[i])
 
         mylog.info('AMRKDTree rebuilt, Final Volume: %e' % kd_sum_volume(self.tree.trunk))
         return self.tree.trunk

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1401,7 +1401,7 @@
         tf = ColorTransferFunction((mi-2, ma+2))
         tf.add_layers(n_contours,w=contour_width,col_bounds = (mi,ma), colormap=cmap)
 
-        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf)
+        cam = pf.h.camera(center, L, width, (N,N), transfer_function=tf, fields=[field])
         image = cam.snapshot()
 
         if args.enhance:

diff -r d397b2e138de6eb61d0a5d1d21faae041558eb1c -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 yt/utilities/grid_data_format/setup.py
--- a/yt/utilities/grid_data_format/setup.py
+++ b/yt/utilities/grid_data_format/setup.py
@@ -9,6 +9,7 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('grid_data_format', parent_package, top_path)
     config.add_subpackage("conversion")
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/a77773aebadb/
Changeset:   a77773aebadb
Branch:      yt-3.0
User:        samskillman
Date:        2013-07-12 17:18:29
Summary:     Fixing up errors in the new cython AMRKDTree. Rendering of enzo data functional.
Affected #:  2 files

diff -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 -r a77773aebadb654bddb92def2556d997724a4622 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -53,11 +53,15 @@
                   [ 1,  1, -1], [ 1,  1,  0], [ 1,  1,  1] ])
 
 class Tree(object):
-    def __init__(self, pf, comm_rank=0, comm_size=1, left=None, right=None,
-            min_level=None, max_level=None, grids=None):
-        
+    def __init__(self, pf, comm_rank=0, comm_size=1, left=None, right=None, 
+        min_level=None, max_level=None, source=None):
+
         self.pf = pf
         self._id_offset = self.pf.h.grids[0]._id_offset
+
+        if source is None:
+            source = pf.h.all_data()
+        self.source = source
         if left is None:
             left = np.array([-np.inf]*3)
         if right is None:
@@ -69,52 +73,24 @@
         self.max_level = max_level
         self.comm_rank = comm_rank
         self.comm_size = comm_size
-        self.trunk = Node(None, None, None,
-                left, right, -1, 1)
-        if grids is None:
-            grids = pf.h.region((left+right)/2., left, right)._grids
-        self.grids = grids
-        self.build(self.grids)
+        self.trunk = Node(None, None, None, left, right, -1, 1)
+        self.build()
 
     def add_grids(self, grids):
+        gles = np.array([g.LeftEdge for g in grids])
+        gres = np.array([g.RightEdge for g in grids])
+        gids = np.array([g.id for g in grids])
+        add_pygrids(self.trunk, gids.size, gles, gres, gids,
+                    self.comm_rank, self.comm_size)
+        del gles, gres, gids, grids
+
+    def build(self):
         lvl_range = range(self.min_level, self.max_level+1)
-        if grids is None:
-            level_iter = self.pf.hierarchy.get_levels()
-            grids_added = 0
-            while True:
-                try:
-                    grids = level_iter.next()
-                except:
-                    break
-                if grids[0].Level not in lvl_range:
-                    continue
-                if grids_added < self.comm_size:
-                    gmask = np.array([g in self.grids for g in grids])
-                    gles = np.array([g.LeftEdge for g in grids])[gmask]
-                    gres = np.array([g.RightEdge for g in grids])[gmask]
-                    gids = np.array([g.id for g in grids])[gmask]
-                    add_pygrids(self.trunk, gids.size, gles, gres, gids, 
-                              self.comm_rank,
-                              self.comm_size)
-                    grids_added += grids.size
-                    del gles, gres, gids, grids
-                else:
-                    grids_added += grids.size
-                    [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
-                              self.comm_rank, self.comm_size) for g in grids]
-            return
-
         for lvl in lvl_range:
-            gles = np.array([g.LeftEdge for g in grids if g.Level == lvl])
-            gres = np.array([g.RightEdge for g in grids if g.Level == lvl])
-            gids = np.array([g.id for g in grids if g.Level == lvl])
-
-            add_pygrids(self.trunk, len(gids), gles, gres, gids, self.comm_rank, self.comm_size)
-            del gles, gres, gids
-
-
-    def build(self, grids=None):
-        self.add_grids(grids)
+            #grids = self.source.select_grids(lvl)
+            grids = np.array([b for b, mask in self.source.blocks if b.Level == lvl])
+            if len(grids) == 0: continue
+            self.add_grids(grids)
 
     def check_tree(self):
         for node in depth_traverse(self.trunk):
@@ -157,19 +133,19 @@
             cells += np.prod(dims)
         return cells
 
+
 class AMRKDTree(ParallelAnalysisInterface):
-    def __init__(self, pf,  l_max=None, le=None, re=None,
-                 fields=None, no_ghost=False, min_level=None, max_level=None,
-                 log_fields=None,
-                 grids=None):
+
+    fields = None
+    log_fields = None
+    no_ghost = True
+
+    def __init__(self, pf, min_level=None, max_level=None,
+                 source=None):
 
         ParallelAnalysisInterface.__init__(self)
 
         self.pf = pf
-        self.l_max = l_max
-        if max_level is None: max_level = l_max
-        if fields is None: fields = ["Density"]
-        self.fields = ensure_list(fields)
         self.current_vcds = []
         self.current_saved_grids = []
         self.bricks = []
@@ -177,33 +153,26 @@
         self.sdx = pf.h.get_smallest_dx()
 
         self._initialized = False
-        self.no_ghost = no_ghost
-        if log_fields is not None:
-            log_fields = ensure_list(log_fields)
-        else:
-            pf.h
-            log_fields = [self.pf.field_info[field].take_log
-                         for field in self.fields]
+        try:
+            self._id_offset = pf.h.grids[0]._id_offset
+        except AttributeError:
+            self._id_offset = 0
 
-        self.log_fields = log_fields
-        self._id_offset = pf.h.grids[0]._id_offset
-
-        if le is None:
-            self.le = pf.domain_left_edge
-        else:
-            self.le = np.array(le)
-        if re is None:
-            self.re = pf.domain_right_edge
-        else:
-            self.re = np.array(re)
+        if source is None:
+            source = self.pf.h.all_data()
+        self.source = source
 
         mylog.debug('Building AMRKDTree')
         self.tree = Tree(pf, self.comm.rank, self.comm.size,
-                         self.le, self.re, min_level=min_level,
-                         max_level=max_level, grids=grids)
+                         min_level=min_level, max_level=max_level,
+                         source=source)
 
-    def initialize_source(self):
-        if self._initialized : return
+    def set_fields(self, fields, log_fields, no_ghost):
+        self.fields = fields
+        self.log_fields = log_fields
+        self.no_ghost = no_ghost
+        del self.bricks, self.brick_dimensions
+        self.brick_dimensions = []
         bricks = []
         for b in self.traverse():
             bricks.append(b)
@@ -211,6 +180,12 @@
         self.brick_dimensions = np.array(self.brick_dimensions)
         self._initialized = True
 
+    def initialize_source(self, fields, log_fields, no_ghost):
+        if fields == self.fields and log_fields == self.log_fields and \
+                no_ghost == self.no_ghost:
+            return
+        self.set_fields(fields, log_fields, no_ghost)
+
     def traverse(self, viewpoint=None):
         for node in kd_traverse(self.tree.trunk, viewpoint=viewpoint):
             yield self.get_brick_data(node)
@@ -219,7 +194,7 @@
         path = np.binary_repr(nodeid)
         depth = 1
         temp = self.tree.trunk
-        for depth in range(1,len(path)):
+        for depth in range(1, len(path)):
             if path[depth] == '0':
                 temp = temp.left
             else:
@@ -281,8 +256,8 @@
             dds = self.current_vcds[self.current_saved_grids.index(grid)]
         else:
             dds = []
-            for i,field in enumerate(self.fields):
-                vcd = grid.get_vertex_centered_data(field,smoothed=True,no_ghost=self.no_ghost).astype('float64')
+            for i, field in enumerate(self.fields):
+                vcd = grid.get_vertex_centered_data(field, smoothed=True,no_ghost=self.no_ghost).astype('float64')
                 if self.log_fields[i]: vcd = np.log10(vcd)
                 dds.append(vcd)
                 self.current_saved_grids.append(grid)
@@ -297,7 +272,8 @@
                                 nre.copy(),
                                 dims.astype('int64'))
         node.data = brick
-        if not self._initialized: self.brick_dimensions.append(dims)
+        if not self._initialized:
+            self.brick_dimensions.append(dims)
         return brick
 
     def locate_brick(self, position):
@@ -305,12 +281,12 @@
         Alias of AMRKDTree.locate_node, to preserve backwards
         compatibility.
         """
-        return self.locate_node(position) 
+        return self.locate_node(position)
 
     def locate_neighbors(self, grid, ci):
-        r"""Given a grid and cell index, finds the 26 neighbor grids 
+        r"""Given a grid and cell index, finds the 26 neighbor grids
         and cell indices.
-        
+
         Parameters
         ----------
         grid: Grid Object

diff -r 4e6c9e9f4eea85c09c81888f51ecd0ecefdb46c6 -r a77773aebadb654bddb92def2556d997724a4622 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -164,7 +164,7 @@
                  log_fields = None,
                  sub_samples = 5, pf = None,
                  min_level=None, max_level=None, no_ghost=True,
-                 data_source=None,
+                 source=None,
                  use_light=False):
         ParallelAnalysisInterface.__init__(self)
         if pf is not None: self.pf = pf
@@ -196,13 +196,13 @@
         if self.no_ghost:
             mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
 
-        if data_source is None:
-            data_source = self.pf.h.all_data()
-        self.data_source = data_source
+        if source is None:
+            source = self.pf.h.all_data()
+        self.source = source
 
         if volume is None:
             volume = AMRKDTree(self.pf, min_level=min_level, 
-                               max_level=max_level, data_source=self.data_source)
+                               max_level=max_level, source=self.source)
         self.volume = volume        
 
     def _setup_box_properties(self, width, center, unit_vectors):

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list