[Yt-svn] yt-commit r1664 - in trunk/yt: . _amr_utils data_objects extensions/lightcone extensions/volume_rendering lagos parallel_tools raven

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Wed Mar 17 11:33:24 PDT 2010


Author: mturk
Date: Wed Mar 17 11:33:12 2010
New Revision: 1664
URL: http://yt.enzotools.org/changeset/1664

Log:
HUGE backport from mercurial.  All unit tests pass.

 * Updates to the time series code.  It's now just about usable.
 * Adding package parallel_tools, where we will be adding all NEW parallel
   support code and migrating old code over to.
 * Ray casting now works in parallel and with non-equal width/height values for
   the output image.  This necessitated a change in API.
 * Added AMRInclinedBox data object.
 * Heaviside function added to transfer functions.
 * VolumeRendering is now an object.  direct_ray_cast is now gone.
 * Added Sam Skillman's image_handling module.
 * Added HomogenizedBrickCollection and DistributedObjectCollection.  These
   were designed to parallelize the brick partitioning, but I believe we need a
   new mechanism for that, so currently they do things but not 3D domain decomp.
 * Adding preliminary support for both Chombo and Gadget.
 * Vertex-centering is a tiny bit faster.



Added:
   trunk/yt/lagos/ChomboFields.py
   trunk/yt/parallel_tools/
   trunk/yt/parallel_tools/__init__.py
   trunk/yt/parallel_tools/distributed_object_collection.py
Modified:
   trunk/yt/_amr_utils/PointsInVolume.pyx
   trunk/yt/_amr_utils/VolumeIntegrator.pyx
   trunk/yt/data_objects/time_series.py
   trunk/yt/extensions/lightcone/LightCone.py
   trunk/yt/extensions/volume_rendering/TransferFunction.py
   trunk/yt/extensions/volume_rendering/__init__.py
   trunk/yt/extensions/volume_rendering/grid_partitioner.py
   trunk/yt/extensions/volume_rendering/software_sampler.py
   trunk/yt/funcs.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/DataReadingFuncs.py
   trunk/yt/lagos/EnzoFields.py
   trunk/yt/lagos/FieldInfoContainer.py
   trunk/yt/lagos/HelperFunctions.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/ParallelTools.py
   trunk/yt/lagos/UniversalFields.py
   trunk/yt/lagos/__init__.py
   trunk/yt/raven/PlotTypes.py
   trunk/yt/setup.py

Modified: trunk/yt/_amr_utils/PointsInVolume.pyx
==============================================================================
--- trunk/yt/_amr_utils/PointsInVolume.pyx	(original)
+++ trunk/yt/_amr_utils/PointsInVolume.pyx	Wed Mar 17 11:33:12 2010
@@ -28,9 +28,13 @@
 cimport numpy as np
 cimport cython
 
+cdef extern from "math.h":
+    double fabs(double x)
+
 @cython.wraparound(False)
 @cython.boundscheck(False)
-def PointsInVolume(np.ndarray[np.float64_t, ndim=2] points,
+def planar_points_in_volume(
+                   np.ndarray[np.float64_t, ndim=2] points,
                    np.ndarray[np.int8_t, ndim=1] pmask,  # pixel mask
                    np.ndarray[np.float64_t, ndim=1] left_edge,
                    np.ndarray[np.float64_t, ndim=1] right_edge,
@@ -69,5 +73,145 @@
         
     return result
 
-                
-                 
+cdef inline void set_rotated_pos(
+            np.float64_t cp[3], np.float64_t rdds[3][3],
+            np.float64_t rorigin[3], int i, int j, int k):
+    cdef int oi
+    for oi in range(3):
+        cp[oi] = rdds[0][oi] * (0.5 + i) \
+               + rdds[1][oi] * (0.5 + j) \
+               + rdds[2][oi] * (0.5 + k) \
+               + rorigin[oi]
+
+#@cython.wraparound(False)
+#@cython.boundscheck(False)
+def grid_points_in_volume(
+                   np.ndarray[np.float64_t, ndim=1] box_lengths,
+                   np.ndarray[np.float64_t, ndim=1] box_origin,
+                   np.ndarray[np.float64_t, ndim=2] rot_mat,
+                   np.ndarray[np.float64_t, ndim=1] grid_left_edge,
+                   np.ndarray[np.float64_t, ndim=1] grid_right_edge,
+                   np.ndarray[np.float64_t, ndim=1] dds,
+                   np.ndarray[np.int32_t, ndim=3] mask,
+                   int break_first):
+    cdef int n[3], i, j, k, ax
+    cdef np.float64_t rds[3][3], cur_pos[3], rorigin[3]
+    for i in range(3):
+        rorigin[i] = 0.0
+    for i in range(3):
+        n[i] = mask.shape[i]
+        for j in range(3):
+            # Set up our transposed dx, which has a component in every
+            # direction
+            rds[i][j] = dds[i] * rot_mat[j,i]
+            # In our rotated coordinate system, the box origin is 0,0,0
+            # so we subtract the box_origin from the grid_origin and rotate
+            # that
+            rorigin[j] += (grid_left_edge[i] - box_origin[i]) * rot_mat[j,i]
+
+    for i in range(n[0]):
+        for j in range(n[1]):
+            for k in range(n[2]):
+                set_rotated_pos(cur_pos, rds, rorigin, i, j, k)
+                if (cur_pos[0] > box_lengths[0]): continue
+                if (cur_pos[1] > box_lengths[1]): continue
+                if (cur_pos[2] > box_lengths[2]): continue
+                if (cur_pos[0] < 0.0): continue
+                if (cur_pos[1] < 0.0): continue
+                if (cur_pos[2] < 0.0): continue
+                if break_first:
+                    if mask[i,j,k]: return 1
+                else:
+                    mask[i,j,k] = 1
+    return 0
+
+cdef void normalize_vector(np.float64_t vec[3]):
+    cdef int i
+    cdef np.float64_t norm = 0.0
+    for i in range(3):
+        norm += vec[i]*vec[i]
+    norm = norm**0.5
+    for i in range(3):
+        vec[i] /= norm
+
+cdef void get_cross_product(np.float64_t v1[3],
+                            np.float64_t v2[3],
+                            np.float64_t cp[3]):
+    cp[0] = v1[1]*v2[2] - v1[2]*v2[1]
+    cp[1] = v1[3]*v2[0] - v1[0]*v2[3]
+    cp[2] = v1[0]*v2[1] - v1[1]*v2[0]
+    #print cp[0], cp[1], cp[2]
+
+cdef int check_projected_overlap(
+        np.float64_t sep_ax[3], np.float64_t sep_vec[3], int gi,
+        np.float64_t b_vec[3][3], np.float64_t g_vec[3][3]):
+    cdef int g_ax, b_ax
+    cdef np.float64_t tba, tga, ba, ga, sep_dot
+    ba = ga = sep_dot = 0.0
+    for g_ax in range(3):
+        # We need the grid vectors, which we'll precompute here
+        tba = tga = 0.0
+        for b_ax in range(3):
+            tba += b_vec[g_ax][b_ax] * sep_vec[b_ax]
+            tga += g_vec[g_ax][b_ax] * sep_vec[b_ax]
+        ba += fabs(tba)
+        ga += fabs(tga)
+        sep_dot += sep_vec[g_ax] * sep_ax[g_ax]
+    #print sep_vec[0], sep_vec[1], sep_vec[2],
+    #print sep_ax[0], sep_ax[1], sep_ax[2]
+    return (fabs(sep_dot) > ba+ga)
+    # Now we do 
+
+ at cython.wraparound(False)
+ at cython.boundscheck(False)
+def find_grids_in_inclined_box(
+        np.ndarray[np.float64_t, ndim=2] box_vectors,
+        np.ndarray[np.float64_t, ndim=1] box_center,
+        np.ndarray[np.float64_t, ndim=2] grid_left_edges,
+        np.ndarray[np.float64_t, ndim=2] grid_right_edges):
+
+    # http://www.gamasutra.com/view/feature/3383/simple_intersection_tests_for_games.php?page=5
+    cdef int n = grid_right_edges.shape[0]
+    cdef int g_ax, b_ax, gi
+    cdef np.float64_t b_vec[3][3], g_vec[3][3], a_vec[3][3], sep_ax[15][3]
+    cdef np.float64_t sep_vec[3], norm
+    cdef np.ndarray[np.int32_t, ndim=1] good = np.zeros(n, dtype='int32')
+    cdef np.ndarray[np.float64_t, ndim=2] grid_centers
+    # Fill in our axis unit vectors
+    for b_ax in range(3):
+        for g_ax in range(3):
+            a_vec[b_ax][g_ax] = <np.float64_t> (b_ax == g_ax)
+    grid_centers = (grid_right_edges + grid_left_edges)/2.0
+
+    # Now we pre-compute our candidate separating axes, because the unit
+    # vectors for all the grids are identical
+    for b_ax in range(3):
+        # We have 6 principal axes we already know, which are the grid (domain)
+        # principal axes and the box axes
+        sep_ax[b_ax][0] = sep_ax[b_ax][1] = sep_ax[b_ax][2] = 0.0
+        sep_ax[b_ax][b_ax] = 1.0 # delta_ijk, for grid axes
+        for g_ax in range(3):
+            b_vec[b_ax][g_ax] = 0.5*box_vectors[b_ax,g_ax]
+            sep_ax[b_ax + 3][g_ax] = b_vec[b_ax][g_ax] # box axes
+        normalize_vector(sep_ax[b_ax + 3])
+        for g_ax in range(3):
+            get_cross_product(b_vec[b_ax], a_vec[g_ax], sep_ax[b_ax*3 + g_ax + 6])
+            normalize_vector(sep_ax[b_ax*3 + g_ax + 6])
+
+    for gi in range(n):
+        for g_ax in range(3):
+            # Calculate the separation vector
+            sep_vec[g_ax] = grid_centers[gi, g_ax] - box_center[g_ax]
+            # Calculate the grid axis lengths
+            g_vec[g_ax][0] = g_vec[g_ax][1] = g_vec[g_ax][2] = 0.0
+            g_vec[g_ax][g_ax] = 0.5 * (grid_right_edges[gi, g_ax]
+                                     - grid_left_edges[gi, g_ax])
+        for b_ax in range(15):
+            #print b_ax,
+            if check_projected_overlap(
+                        sep_ax[b_ax], sep_vec, gi,
+                        b_vec,  g_vec):
+                good[gi] = 1
+                break
+    return good
+

Modified: trunk/yt/_amr_utils/VolumeIntegrator.pyx
==============================================================================
--- trunk/yt/_amr_utils/VolumeIntegrator.pyx	(original)
+++ trunk/yt/_amr_utils/VolumeIntegrator.pyx	Wed Mar 17 11:33:12 2010
@@ -119,6 +119,9 @@
         bin_id = <int> ((dv - self.x_bounds[0]) * self.idbin)
         # Recall that linear interpolation is y0 + (x-x0) * dx/dy
         dd = dv-(self.x_bounds[0] + bin_id * self.dbin) # x - x0
+        if bin_id > self.nbins - 2 or bin_id < 0:
+            for channel in range(4): trgba[channel] = 0.0
+            return
         for channel in range(4):
             bv = self.vs[channel][bin_id] # This is x0
             dy = self.vs[channel][bin_id+1]-bv # dy
@@ -150,7 +153,7 @@
     cdef public object avp_pos, avp_dir, acenter, aimage
     cdef np.float64_t *vp_pos, *vp_dir, *center, *image,
     cdef np.float64_t pdx, pdy, bounds[4]
-    cdef int nv
+    cdef int nv[2]
     cdef public object ax_vec, ay_vec
     cdef np.float64_t *x_vec, *y_vec
 
@@ -175,10 +178,11 @@
         self.image = <np.float64_t *> image.data
         self.x_vec = <np.float64_t *> x_vec.data
         self.y_vec = <np.float64_t *> y_vec.data
-        self.nv = vp_pos.shape[0]
+        self.nv[0] = vp_pos.shape[0]
+        self.nv[1] = vp_pos.shape[1]
         for i in range(4): self.bounds[i] = bounds[i]
-        self.pdx = (self.bounds[1] - self.bounds[0])/self.nv
-        self.pdy = (self.bounds[3] - self.bounds[2])/self.nv
+        self.pdx = (self.bounds[1] - self.bounds[0])/self.nv[0]
+        self.pdy = (self.bounds[3] - self.bounds[2])/self.nv[1]
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -190,10 +194,10 @@
         for i in range(3):
             cx += self.center[i] * self.x_vec[i]
             cy += self.center[i] * self.y_vec[i]
-        rv[0] = <int> floor((ex[0] - cx - self.bounds[0])/self.pdx)
-        rv[1] = rv[0] + <int> ceil((ex[1] - ex[0])/self.pdx)
-        rv[2] = <int> floor((ex[2] - cy - self.bounds[2])/self.pdy)
-        rv[3] = rv[2] + <int> ceil((ex[3] - ex[2])/self.pdy)
+        rv[0] = lrint((ex[0] - cx - self.bounds[0])/self.pdx)
+        rv[1] = rv[0] + lrint((ex[1] - ex[0])/self.pdx)
+        rv[2] = lrint((ex[2] - cy - self.bounds[2])/self.pdy)
+        rv[3] = rv[2] + lrint((ex[3] - ex[2])/self.pdy)
 
     cdef inline void copy_into(self, np.float64_t *fv, np.float64_t *tv,
                         int i, int j, int nk):
@@ -201,13 +205,13 @@
         # to-vector is flat and 'ni' long
         cdef int k
         for k in range(nk):
-            tv[k] = fv[(((k*self.nv)+j)*self.nv+i)]
+            tv[k] = fv[(((k*self.nv[1])+j)*self.nv[0]+i)]
 
     cdef inline void copy_back(self, np.float64_t *fv, np.float64_t *tv,
                         int i, int j, int nk):
         cdef int k
         for k in range(nk):
-            tv[(((k*self.nv)+j)*self.nv+i)] = fv[k]
+            tv[(((k*self.nv[1])+j)*self.nv[0]+i)] = fv[k]
 
 cdef class PartitionedGrid:
     cdef public object my_data
@@ -218,18 +222,20 @@
     cdef np.float64_t right_edge[3]
     cdef np.float64_t dds[3]
     cdef np.float64_t idds[3]
-    cdef public np.float64_t min_dds
     cdef int dims[3]
+    cdef public int parent_grid_id
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     def __cinit__(self,
+                  int parent_grid_id,
                   np.ndarray[np.float64_t, ndim=3] data,
                   np.ndarray[np.float64_t, ndim=1] left_edge,
                   np.ndarray[np.float64_t, ndim=1] right_edge,
                   np.ndarray[np.int64_t, ndim=1] dims):
         # The data is likely brought in via a slice, so we copy it
         cdef int i, j, k, size
+        self.parent_grid_id = parent_grid_id
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
         for i in range(3):
@@ -252,10 +258,13 @@
         cdef np.float64_t v_pos[3], v_dir[3], rgba[4], extrema[4]
         self.calculate_extent(vp, extrema)
         vp.get_start_stop(extrema, iter)
-        for i in range(4): iter[i] = iclip(iter[i], 0, vp.nv)
+        iter[0] = iclip(iter[0], 0, vp.nv[0])
+        iter[1] = iclip(iter[1], 0, vp.nv[0])
+        iter[2] = iclip(iter[2], 0, vp.nv[1])
+        iter[3] = iclip(iter[3], 0, vp.nv[1])
         hit = 0
-        for vj in range(iter[0], iter[1]):
-            for vi in range(iter[2], iter[3]):
+        for vi in range(iter[0], iter[1]):
+            for vj in range(iter[2], iter[3]):
                 vp.copy_into(vp.vp_pos, v_pos, vi, vj, 3)
                 vp.copy_into(vp.image, rgba, vi, vj, 4)
                 self.integrate_ray(v_pos, vp.vp_dir, rgba, tf)
@@ -415,7 +424,7 @@
             for i in range(3):
                 dp[i] += ds[i]
             dv = trilinear_interpolate(self.dims, ci, dp, self.data)
-            if not ((dv > tf.x_bounds[0]) and (dv < tf.x_bounds[1])):
+            if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
                 continue
             if tf.use_light == 1:
                 eval_gradient(self.dims, ci, dp, self.data, grad)
@@ -459,9 +468,12 @@
     cdef public object LeftEdge
     cdef public object RightEdge
     cdef public object subgrid_faces
-    def __cinit__(self, np.ndarray[np.float64_t, ndim=1] left_edge,
-                       np.ndarray[np.float64_t, ndim=1] right_edge,
-                       subgrid_faces):
+    cdef public int parent_grid_id
+    def __cinit__(self, int parent_grid_id,
+                  np.ndarray[np.float64_t, ndim=1] left_edge,
+                  np.ndarray[np.float64_t, ndim=1] right_edge,
+                  subgrid_faces):
+        self.parent_grid_id = parent_grid_id
         cdef int i
         self.LeftEdge = left_edge
         self.RightEdge = right_edge
@@ -500,11 +512,13 @@
 
         for i in range(3): split_left[i] = self.right_edge[i]
         split_left[direction] = sp[direction]
-        left = ProtoPrism(self.LeftEdge, split_left, self.subgrid_faces)
+        left = ProtoPrism(self.parent_grid_id, self.LeftEdge, split_left,
+                          self.subgrid_faces)
 
         for i in range(3): split_right[i] = self.left_edge[i]
         split_right[direction] = sp[direction]
-        right = ProtoPrism(split_right, self.RightEdge, self.subgrid_faces)
+        right = ProtoPrism(self.parent_grid_id, split_right, self.RightEdge,
+                           self.subgrid_faces)
 
         return (left, right)
 
@@ -528,5 +542,6 @@
             dims[i] = idims[i]
         cdef np.ndarray[np.float64_t, ndim=3] new_data
         new_data = data[li[0]:ri[0]+1,li[1]:ri[1]+1,li[2]:ri[2]+1].copy()
-        PG = PartitionedGrid(new_data, self.LeftEdge, self.RightEdge, dims)
+        PG = PartitionedGrid(self.parent_grid_id, new_data,
+                             self.LeftEdge, self.RightEdge, dims)
         return [PG]

Modified: trunk/yt/data_objects/time_series.py
==============================================================================
--- trunk/yt/data_objects/time_series.py	(original)
+++ trunk/yt/data_objects/time_series.py	Wed Mar 17 11:33:12 2010
@@ -9,25 +9,33 @@
         # We can make this fancier, but this works
         return self.outputs.__iter__()
 
-_enzo_header = "DATASET WRITTEN "
-
 class EnzoTimeSeries(TimeSeriesData):
-    def __init__(self, name, output_log = "OutputLog"):
+    _enzo_header = "DATASET WRITTEN "
+    def __init__(self, name, **kwargs):
         TimeSeriesData.__init__(self, name)
+        output_list = kwargs.pop('output_list', None)
+        output_log = kwargs.pop('output_log', None)
+        if output_list: self._populate_output_list(output_list)
+        if output_log: self._populate_output_log(output_log)
+        for type_name in data_object_registry:
+            setattr(self, type_name, functools.partial(
+                TimeSeriesDataObject, self, type_name))
+
+    def _populate_output_list(self, output_list):
+        for output in output_list:
+            self._insert(EnzoStaticOutput(fn))
+
+    def _populate_output_log(self, output_log):
         for line in open(output_log):
             if not line.startswith(_enzo_header): continue
             fn = line[len(_enzo_header):].strip()
             self._insert(EnzoStaticOutput(fn))
 
-        for type_name in data_object_registry:
-            setattr(self, type_name, functools.partial(
-                TimeSeriesDataObject, self, type_name))
-
     def __getitem__(self, key):
-        return self.outputs[key]
         if isinstance(key, types.SliceType):
             if isinstance(key.start, types.FloatType):
-                self.get_range(key.start, key.stop)
+                return self.get_range(key.start, key.stop)
+        return self.outputs[key]
         
     def _insert(self, pf):
         # We get handed an instantiated parameter file

Modified: trunk/yt/extensions/lightcone/LightCone.py
==============================================================================
--- trunk/yt/extensions/lightcone/LightCone.py	(original)
+++ trunk/yt/extensions/lightcone/LightCone.py	Wed Mar 17 11:33:12 2010
@@ -38,7 +38,7 @@
     def __init__(self, EnzoParameterFile, initial_redshift=1.0, final_redshift=0.0, observer_redshift=0.0,
                  field_of_view_in_arcminutes=600.0, image_resolution_in_arcseconds=60.0, 
                  use_minimum_datasets=True, deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
-                 output_dir='LC', output_prefix='LightCone'):
+                 output_dir='LC', output_prefix='LightCone', **kwargs):
         """
         Initialize a LightCone object.
         :param initial_redshift (float): the initial (highest) redshift for the light cone.  Default: 1.0.
@@ -88,7 +88,7 @@
         # Initialize EnzoSimulation machinery for getting dataset list.
         EnzoSimulation.__init__(self, EnzoParameterFile, initial_redshift=self.initial_redshift,
                                 final_redshift=self.final_redshift, links=True,
-                                enzo_parameters={'CosmologyComovingBoxSize':float})
+                                enzo_parameters={'CosmologyComovingBoxSize':float}, **kwargs)
 
         # Calculate number of pixels.
         self.pixels = int(self.field_of_view_in_arcminutes * 60.0 / \

Modified: trunk/yt/extensions/volume_rendering/TransferFunction.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/TransferFunction.py	(original)
+++ trunk/yt/extensions/volume_rendering/TransferFunction.py	Wed Mar 17 11:33:12 2010
@@ -46,6 +46,11 @@
             slope * (self.x - x0) + y0
         self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
 
+    def add_step(self,start,stop,value):
+        vals = na.zeros(self.x.shape, 'float64')
+        vals[(self.x >= start) & (self.x <= stop)] = value
+        self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
+
     def plot(self, filename):
         import matplotlib;matplotlib.use("Agg");import pylab
         pylab.clf()
@@ -71,19 +76,36 @@
         for tf, v in zip(self.funcs, height):
             tf.add_gaussian(location, width, v)
 
+    def add_step(self, start, stop, height):
+        for tf, v in zip(self.funcs, height):
+            tf.add_step(start, stop, v)
+
     def plot(self, filename):
-        import matplotlib;matplotlib.use("Agg");import pylab
-        pylab.clf()
-        for c,tf in zip(['r','g','b'], self.funcs):
-            pylab.plot(tf.x, tf.y, '-' + c)
-            pylab.fill(tf.x, tf.y, c, alpha=0.2)
-        pylab.plot(self.alpha.x, self.alpha.y, '-k')
-        pylab.fill(self.alpha.x, self.alpha.y, 'k', alpha=0.1)
-        pylab.xlim(*self.x_bounds)
-        pylab.ylim(0.0, 1.0)
-        pylab.xlabel("Value")
-        pylab.ylabel("Transmission")
-        pylab.savefig(filename)
+        from matplotlib import pyplot
+        from matplotlib.ticker import FuncFormatter
+        pyplot.clf()
+        ax = pyplot.axes()
+        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        ax.imshow(i_data, origin='lower')
+        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.set_xlim(0, self.alpha.x.size)
+        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
+        ax.xaxis.set_ticks(xticks)
+        def x_format(x, pos):
+            return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
+        ax.xaxis.set_major_formatter(FuncFormatter(x_format))
+        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        ax.yaxis.set_ticks(yticks)
+        def y_format(y, pos):
+            return (y / self.alpha.y.size)
+        ax.yaxis.set_major_formatter(FuncFormatter(y_format))
+        ax.set_ylabel("Transmission")
+        ax.set_xlabel("Value")
+        pyplot.savefig(filename)
 
     def sample_colormap(self, v, w, alpha=None, colormap="gist_stern"):
         rel = (v - self.x_bounds[0])/(self.x_bounds[1] - self.x_bounds[0])
@@ -94,12 +116,14 @@
         print "Adding gaussian at %s with width %s and colors %s" % (
                 v, w, (r,g,b,alpha))
 
-    def add_layers(self, N, w=None, mi=None, ma=None, colormap="gist_stern"):
+    def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
+                   colormap="gist_stern"):
         dist = (self.x_bounds[1] - self.x_bounds[0])
         if mi is None: mi = self.x_bounds[0] + dist/(10.0*N)
         if ma is None: ma = self.x_bounds[1] - dist/(10.0*N)
         if w is None: w = 0.001 * (ma-mi)/N
-        for v, a in zip(na.mgrid[mi:ma:N*1j], na.logspace(-2.0, 0.0,N)):
+        if alpha is None: alpha = na.logspace(-2.0, 0.0, N)
+        for v, a in zip(na.mgrid[mi:ma:N*1j], alpha):
             self.sample_colormap(v, w, a, colormap=colormap)
 
 if __name__ == "__main__":

Modified: trunk/yt/extensions/volume_rendering/__init__.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/__init__.py	(original)
+++ trunk/yt/extensions/volume_rendering/__init__.py	Wed Mar 17 11:33:12 2010
@@ -28,7 +28,9 @@
 from TransferFunction import TransferFunction, ColorTransferFunction
 from yt.amr_utils import PartitionedGrid, VectorPlane, \
                              TransferFunctionProxy
-from grid_partitioner import partition_all_grids, partition_grid, \
+from grid_partitioner import HomogenizedBrickCollection, \
                              export_partitioned_grids, \
                              import_partitioned_grids
-from software_sampler import direct_ray_cast
+from software_sampler import VolumeRendering
+from image_handling import export_rgba, import_rgba, \
+                           plot_channel, plot_rgb

Modified: trunk/yt/extensions/volume_rendering/grid_partitioner.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/grid_partitioner.py	(original)
+++ trunk/yt/extensions/volume_rendering/grid_partitioner.py	Wed Mar 17 11:33:12 2010
@@ -27,7 +27,166 @@
 from yt.funcs import *
 import h5py
 
-from yt.amr_utils import PartitionedGrid, ProtoPrism, GridFace
+from yt.amr_utils import PartitionedGrid, ProtoPrism, GridFace, \
+        grid_points_in_volume, find_grids_in_inclined_box
+from yt.lagos import ParallelAnalysisInterface, only_on_root, parallel_root_only
+from yt.parallel_tools import DistributedObjectCollection
+
+class HomogenizedBrickCollection(DistributedObjectCollection):
+    def __init__(self, source):
+        # The idea here is that we have two sources -- the global_domain
+        # source, which would be a decomposition of the 3D domain, and a
+        # local_domain source, which is the set of bricks we want at the end.
+        self.source = source
+        self.pf = source.pf
+
+    @classmethod
+    def load_bricks(self, base_filename):
+        pass
+
+    def write_my_bricks(self, base_filename):
+        pass
+
+    def store_bricks(self, base_filename):
+        pass
+    
+    @parallel_root_only
+    def write_hierarchy(self, base_filename):
+        pass
+    
+    def _partition_grid(self, grid, field, log_field = True):
+        vcd = grid.get_vertex_centered_data(field).astype('float64')
+        if log_field: vcd = na.log10(vcd)
+
+        GF = GridFaces(grid.Children + [grid])
+        PP = ProtoPrism(grid.id, grid.LeftEdge, grid.RightEdge, GF)
+
+        pgs = []
+        for P in PP.sweep(0):
+            pgs += P.get_brick(grid.LeftEdge, grid.dds, vcd, grid.child_mask)
+        return pgs
+
+    def _partition_local_grids(self, fields = "Density", log_field = True):
+        fields = ensure_list(fields)
+        bricks = []
+        # We preload.
+        # UNCOMMENT FOR PARALLELISM
+        #grid_list = list(self._get_grid_objs())
+        grid_list = list(self.source._grids)
+        self._preload(grid_list, fields, self.pf.h.io)
+        pbar = get_pbar("Partitioning ", len(grid_list))
+        # UNCOMMENT FOR PARALLELISM
+        #for i, g in enumerate(self._get_grids()):
+        print "THIS MANY GRIDS!", len(grid_list)
+        for i, g in enumerate(self.source._grids):
+            pbar.update(i)
+            bricks += self._partition_grid(g, fields[0], log_field)
+        pbar.finish()
+        bricks = na.array(bricks, dtype='object')
+        NB = len(bricks)
+        # Now we set up our (local for now) hierarchy.  Note that to calculate
+        # intersection, we only need to do the left edge & right edge.
+        #
+        # We're going to double up a little bit here in memory.
+        self.brick_left_edges = na.zeros( (NB, 3), dtype='float64')
+        self.brick_right_edges = na.zeros( (NB, 3), dtype='float64')
+        self.brick_parents = na.zeros( NB, dtype='int64')
+        self.brick_dimensions = na.zeros( (NB, 3), dtype='int64')
+        self.brick_owners = na.ones(NB, dtype='int32') * self._mpi_get_rank()
+        self._object_owners = self.brick_owners
+        for i,b in enumerate(bricks):
+            self.brick_left_edges[i,:] = b.LeftEdge
+            self.brick_right_edges[i,:] = b.RightEdge
+            self.brick_parents[i] = b.parent_grid_id
+            self.brick_dimensions[i,:] = b.my_data.shape
+        # Vertex-centered means we subtract one from the shape
+        self.brick_dimensions -= 1
+        self.bricks = na.array(bricks, dtype='object')
+        # UNCOMMENT FOR PARALLELISM
+        #self.join_lists()
+
+    def _get_object_info(self):
+        # We transpose here for the catdict operation
+        info_dict = dict(left_edges = self.brick_left_edges.transpose(),
+                         right_edges = self.brick_right_edges.transpose(),
+                         parents = self.brick_parents,
+                         owners = self.brick_owners,
+                         dimensions = self.brick_dimensions.transpose(),)
+        return info_dict
+
+    def _set_object_info(self, info_dict):
+        self.brick_left_edges = info_dict.pop("left_edges").transpose()
+        self.brick_right_edges = info_dict.pop("right_edges").transpose()
+        self.brick_parents = info_dict.pop("parents")
+        self.brick_dimensions = info_dict.pop("dimensions").transpose()
+        self.brick_owners = info_dict.pop("owners")
+        self._object_owners = self.brick_owners
+        bricks = self.bricks
+        self.bricks = na.array([None] * self.brick_owners.size, dtype='object')
+        # Copy our bricks back in
+        self.bricks[self.brick_owners == self._mpi_get_rank()] = bricks[:]
+
+    def _create_buffer(self, ind_list):
+        # Note that we have vertex-centered data, so we add one before taking
+        # the prod and the sum
+        total_size = (self.brick_dimensions[ind_list,:] + 1).prod(axis=1).sum()
+        mylog.debug("Creating buffer for %s bricks (%s)",
+                    len(ind_list), total_size)
+        my_buffer = na.zeros(total_size, dtype='float64')
+        return my_buffer
+
+    def _pack_buffer(self, ind_list, my_buffer):
+        si = 0
+        for index in ind_list:
+            d = self.bricks[index].my_data.ravel()
+            my_buffer[si:si+d.size] = d[:]
+            si += d.size
+
+    def _unpack_buffer(self, ind_list, my_buffer):
+        si = 0
+        for index in ind_list:
+            pgi = self.brick_parents[index]
+            LE = self.brick_left_edges[index,:].copy()
+            RE = self.brick_right_edges[index,:].copy()
+            dims = self.brick_dimensions[index,:].copy()
+            size = (dims + 1).prod()
+            data = my_buffer[si:si+size].reshape(dims + 1)
+            self.bricks[index] = PartitionedGrid(
+                    pgi, data, LE, RE, dims)
+            si += size
+
+    def _wipe_objects(self, indices):
+        self.bricks[indices] = None
+
+    def _collect_bricks(self, intersection_source):
+        if not self._distributed: return
+        # This entire routine should instead be set up to do:
+        #   alltoall broadcast of the *number* of requested bricks
+        #   non-blocking receives posted for int arrays
+        #   sizes of data calculated
+        #   concatenated data receives posted
+        #   send all data
+        #   get bricks back
+        # This presupposes that we are using the AMRInclinedBox as a data
+        # source.  If we're not, we ought to be.
+        needed_brick_i = find_grids_in_inclined_box(
+            intersection_source.box_vectors, intersection_source.center,
+            self.brick_left_edges, self.brick_right_edges)
+        needed_brick_i = na.where(needed_brick_i)[0]
+        self._collect_objects(needed_brick_i)
+
+    def _initialize_parallel(self):
+        pass
+
+    def _finalize_parallel(self):
+        pass
+
+    def get_brick(self, brick_id):
+        pass
+
+    @property
+    def _grids(self):
+        return self.source._grids
 
 class GridFaces(object):
     def __init__(self, grids):
@@ -42,92 +201,12 @@
     def __getitem__(self, item):
         return self.faces[item]
 
-def partition_grid(start_grid, field, log_field = True, threshold = None):
-    if threshold is not None:
-        if start_grid[field].max() < threshold[0] or \
-           start_grid[field].min() > threshold[1]: return None
-    to_cut_up = start_grid.get_vertex_centered_data(field, smoothed=True).astype('float64')
-
-    if log_field: to_cut_up = na.log10(to_cut_up)
-
-    GF = GridFaces(start_grid.Children + [start_grid])
-    PP = ProtoPrism(start_grid.LeftEdge, start_grid.RightEdge, GF)
-    pgs = []
-    for P in PP.sweep(0):
-        pgs += P.get_brick(start_grid.LeftEdge, start_grid.dds, to_cut_up, start_grid.child_mask)
-    return pgs
-
-    if len(start_grid.Children) == 0:
-        pg = PartitionedGrid(
-                to_cut_up.copy(),
-                na.array(start_grid.LeftEdge, dtype='float64'),
-                na.array(start_grid.RightEdge, dtype='float64'),
-                na.array(start_grid.ActiveDimensions, dtype='int64'))
-        return [pg]
-
-    x_vert = [0, start_grid.ActiveDimensions[0]]
-    y_vert = [0, start_grid.ActiveDimensions[1]]
-    z_vert = [0, start_grid.ActiveDimensions[2]]
-
-    gi = start_grid.get_global_startindex()
-    for grid in start_grid.Children:
-        si = grid.get_global_startindex()/2 - gi
-        ei = si + grid.ActiveDimensions/2 
-        x_vert += [si[0], ei[0]]
-        y_vert += [si[1], ei[1]]
-        z_vert += [si[2], ei[2]]
-
-    # Now we sort by our vertices, in axis order
-
-    x_vert.sort()
-    y_vert.sort()
-    z_vert.sort()
-
-    return [g for g in _partition(start_grid, to_cut_up, x_vert, y_vert, z_vert)]
-
-def _partition(grid, grid_data, x_vert, y_vert, z_vert):
-    grids = []
-    cim = grid.child_index_mask
-    for xs, xe in zip(x_vert[:-1], x_vert[1:]):
-        for ys, ye in zip(y_vert[:-1], y_vert[1:]):
-            for zs, ze in zip(z_vert[:-1], z_vert[1:]):
-                sl = (slice(xs, xe), slice(ys, ye), slice(zs, ze))
-                dd = cim[sl]
-                if dd.size == 0: continue
-                uniq = na.unique(dd)
-                if uniq.size > 1: continue
-                if uniq[0] > -1: continue
-                data = grid_data[xs:xe+1,ys:ye+1,zs:ze+1].copy()
-                dims = na.array(dd.shape, dtype='int64')
-                start_index = na.array([xs,ys,zs], dtype='int64')
-                left_edge = grid.LeftEdge + start_index * grid.dds
-                right_edge = left_edge + dims * grid.dds
-                yield PartitionedGrid(
-                    data, left_edge, right_edge, dims)
-
-def partition_all_grids(grid_list, field = "Density", log_field = True,
-                        threshold = (-1e300, 1e300), eval_func = None):
-    new_grids = []
-    pbar = get_pbar("Partitioning ", len(grid_list))
-    if eval_func is None: eval_func = lambda a: True
-    dx = 1e300
-    for i, g in enumerate(grid_list):
-        if not eval_func(g): continue
-        pbar.update(i)
-        if g.dds[0] < dx: dx = g.dds[0]
-        to_add = partition_grid(g, field, log_field, threshold)
-        if to_add is not None: new_grids += to_add
-    pbar.finish()
-    for g in new_grids: g.min_dds = dx
-    return na.array(new_grids, dtype='object')
-
 def export_partitioned_grids(grid_list, fn, int_type=na.int64, float_type=na.float64):
     f = h5py.File(fn, "w")
     pbar = get_pbar("Writing Grids", len(grid_list))
     nelem = sum((grid.my_data.size for grid in grid_list))
     ngrids = len(grid_list)
     group = f.create_group("/PGrids")
-    group.attrs["min_dds"] = grid_list[0].min_dds
     left_edge = na.concatenate([[grid.LeftEdge,] for grid in grid_list])
     f.create_dataset("/PGrids/LeftEdges", data=left_edge, dtype=float_type); del left_edge
     right_edge = na.concatenate([[grid.RightEdge,] for grid in grid_list])
@@ -149,120 +228,14 @@
     data = f["/PGrids/Data"][:].astype(float_type)
     pbar = get_pbar("Reading Grids", dims.shape[0])
     curpos = 0
-    dx = f["/PGrids"].attrs["min_dds"]
     for i in xrange(dims.shape[0]):
         gd = dims[i,:]
         gle, gre = left_edges[i,:], right_edges[i,:]
         gdata = data[curpos:curpos+gd.prod()].reshape(gd)
         # Vertex -> Grid, so we -1 from dims in this
-        grid_list.append(PartitionedGrid(gdata, gle, gre, gd - 1))
-        grid_list[-1].min_dds = dx
+        grid_list.append(PartitionedGrid(-1, gdata, gle, gre, gd - 1))
         curpos += gd.prod()
         pbar.update(i)
     pbar.finish()
     f.close()
     return na.array(grid_list, dtype='object')
-
-class PartitionRegion(object):
-    _count = 0
-    def __init__(self, dims, source_offset, source_vertices, cim_base):
-        self.source_offset = source_offset
-        self.dims = dims
-        cv = []
-        self._cim = cim_base
-        self.child_vertices = source_vertices
-
-    @property
-    def cim(self):
-        return self._cim[self.sl]
-
-    @property
-    def sl(self):
-        sls = self.source_offset
-        sle = self.source_offset + self.dims
-        return tuple([slice(sls[i], sle[i]) for i in range(3)])
-
-    def split(self, axis, coord):
-        dims_left = self.dims.copy()
-        dims_left[axis] = coord - self.source_offset[axis]
-        off_left = self.source_offset.copy()
-        left_region = PartitionRegion(dims_left, off_left,
-                        self.child_vertices, self._cim)
-        dims_right = self.dims.copy()
-        dims_right[axis] = self.dims[axis] - coord + self.source_offset[axis]
-        off_right = self.source_offset.copy()
-        off_right[axis] = coord
-        right_region = PartitionRegion(dims_right, off_right,
-                        self.child_vertices, self._cim)
-        return left_region, right_region
-        
-    def find_hyperplane(self, axis):
-        # Our axis is the normal to the hyperplane
-        # Region boundaries is [2][3]
-        # child_vertices is flat 3D array
-        min_balance = 1e30
-        considered = set([self.source_offset[axis]])
-        considered.add(self.source_offset[axis] + self.dims[axis])
-        best_coord = self.source_offset[axis] + self.dims[axis]
-        for v in self.child_vertices:
-            coord = v[axis]
-            sc = coord - self.source_offset[axis]
-            if coord in considered: continue
-            if sc >= self.dims[axis]: continue
-            if sc < 0: continue
-            eff = self.evaluate_hyperplane(axis, coord)
-            if eff < min_balance:
-                min_balance = eff
-                best_coord = coord
-            considered.add(coord)
-        return best_coord
-
-    def evaluate_hyperplane(self, axis, coord):
-        # We check that we're roughly evenly balanced on either side of the grid
-        # Calculate how well balanced it is...
-        vert = self.child_vertices[:,axis]
-        n_left = (vert <= coord).sum()
-        n_right = (vert > coord).sum()
-        eff = abs(0.5 - (n_left / float(vert.shape[0])))
-        return eff
-
-def partition_region(region, axis=0):
-    # region_boundaries is in ints
-    split_coord = region.find_hyperplane(axis)
-    sc = split_coord - region.source_offset[axis]
-    if sc == 0 or sc == region.dims[axis]:
-        rc = na.unique(region.cim)
-        if rc.size > 1 and rc[0] == -1:
-            region._count += 1
-            if region._count > 3:
-                import pdb;pdb.set_trace()
-            return partition_region(region, (axis+1)%3)
-        elif rc.size > 1 and rc[0] > -1:
-            return []
-    left_region, right_region = region.split(axis, split_coord)
-    lrc = na.unique(left_region.cim)
-    rrc = na.unique(right_region.cim)
-    if lrc.size > 1:
-        if lrc[0] == -1:
-            left_region = partition_region(left_region, (axis + 1) % 3)
-        if lrc[0] > -1:
-            left_region = []
-        #print axis, split_coord, "Not splitting left region", lrc
-    else:
-        if lrc[0] == -1:
-            left_region = [left_region]
-        else:
-            left_region = []
-
-    if rrc.size > 1:
-        if rrc[0] == -1:
-            right_region = partition_region(right_region, (axis + 1) % 3)
-        if rrc[0] > -1:
-            right_region = []
-    else:
-        if rrc[0] == -1:
-            right_region = [right_region]
-        else:
-            right_region = []
-
-    return left_region + right_region

Modified: trunk/yt/extensions/volume_rendering/software_sampler.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/software_sampler.py	(original)
+++ trunk/yt/extensions/volume_rendering/software_sampler.py	Wed Mar 17 11:33:12 2010
@@ -24,79 +24,147 @@
 """
 
 import numpy as na
+import h5py
 from yt.extensions.volume_rendering import *
 from yt.funcs import *
+from yt.lagos import data_object_registry, ParallelAnalysisInterface
 
-def direct_ray_cast(pf, L, center, W, Nvec, tf, 
-                    partitioned_grids = None, field = 'Density',
-                    log_field = True, whole_box=False,
-                    nsamples = 5, x_vec = None, y_vec = None):
-    center = na.array(center, dtype='float64')
-
-    # This just helps us keep track of stuff, and it's cheap
-    cp = pf.h.cutting(L, center)
-    if x_vec is None: x_vec = cp._x_vec
-    if y_vec is None: y_vec = cp._y_vec
-    back_center = center - cp._norm_vec * na.sqrt(3) * W
-    front_center = center + cp._norm_vec * na.sqrt(3) *  W
-    if whole_box:
-        cylinder = pf.h.region([0.5]*3,[0.0]*3,[1.0]*3)
-    else:
-        cylinder = pf.h.disk(center, L, na.sqrt(3)*W, 2*W*na.sqrt(3))
-
-    if partitioned_grids == None:
-        partitioned_grids = partition_all_grids(cylinder._grids,
-                                    eval_func = lambda g: na.any(cylinder._get_point_indices(g)),
-                                                field = field, log_field = log_field)
-    #partitioned_grids = partition_all_grids(pf.h.grids)
-
-    LE = (na.array([grid.LeftEdge for grid in partitioned_grids]) - back_center) * cp._norm_vec
-    RE = (na.array([grid.RightEdge for grid in partitioned_grids]) - back_center) * cp._norm_vec
-    DL = na.sum(LE, axis=1); del LE
-    DR = na.sum(RE, axis=1); del RE
-    dist = na.minimum(DL, DR)
-    ind = na.argsort(dist)
-    
-    image = na.zeros((Nvec,Nvec,4), dtype='float64', order='F')
-    image[:,:,3] = 0.0
-
-    # Now we need to generate regular x,y,z values in regular space for our vector
-    # starting places.
-    px, py = na.mgrid[-W:W:Nvec*1j,-W:W:Nvec*1j]
-    xv = cp._inv_mat[0,0]*px + cp._inv_mat[0,1]*py + back_center[0]
-    yv = cp._inv_mat[1,0]*px + cp._inv_mat[1,1]*py + back_center[1]
-    zv = cp._inv_mat[2,0]*px + cp._inv_mat[2,1]*py + back_center[2]
-    vectors = na.array([xv, yv, zv], dtype='float64').transpose()
-    vectors = vectors.copy('F')
-    xp0, xp1 = px.min(), px.max()
-    yp0, yp1 = py.min(), py.max()
-
-    ng = partitioned_grids.size
-    norm_vec = cp._norm_vec
-    norm_vec = cp._norm_vec * (2.0*W*na.sqrt(3))
-    hit = 0
-    tnow = time.time()
-
-    vp = VectorPlane(vectors, norm_vec, back_center,
-                     (xp0, xp1, yp0, yp1), image, x_vec, y_vec)
-
-    tf.light_dir = cp._norm_vec + 0.5 * x_vec + 0.5 * y_vec
-    cx, cy, cz = 0.3, -0.3, 0.3
-    tf.light_dir = (cp._inv_mat[0,0]*cx + cp._inv_mat[0,1]*cy + cz,
-                    cp._inv_mat[1,0]*cx + cp._inv_mat[1,1]*cy + cz,
-                    cp._inv_mat[2,0]*cx + cp._inv_mat[2,1]*cy + cz)
-    
-    tfp = TransferFunctionProxy(tf)
-    tfp.ns = nsamples
-
-
-    total_cells = sum(na.prod(g.my_data.shape) for g in partitioned_grids)
-    pbar = get_pbar("Ray casting ", total_cells)
-    total_cells = 0
-    for i,g in enumerate(partitioned_grids[ind]):
-        pbar.update(total_cells)
-        pos = g.cast_plane(tfp, vp)
-        total_cells += na.prod(g.my_data.shape)
-    pbar.finish()
+# We're going to register this class, but it does not directly inherit from
+# AMRData.
+class VolumeRendering(ParallelAnalysisInterface):
+    bricks = None
+    def __init__(self, normal_vector, width, center,
+                 resolution, transfer_function,
+                 fields = None, whole_box = False,
+                 sub_samples = 5, north_vector = None,
+                 pf = None):
+        # Now we replicate some of the 'cutting plane' logic
+        if not iterable(resolution):
+            resolution = (resolution, resolution)
+        self.resolution = resolution
+        self.sub_samples = sub_samples
+        if not iterable(width):
+            width = (width, width, width) # front/back, left/right, top/bottom
+        self.width = width
+        self.center = center
+        if fields is None: fields = ["Density"]
+        self.fields = fields
+        self.transfer_function = transfer_function
+
+        # Now we set up our  various vectors
+        normal_vector /= na.sqrt( na.dot(normal_vector, normal_vector))
+        if north_vector is None:
+            vecs = na.identity(3)
+            t = na.cross(normal_vector, vecs).sum(axis=1)
+            ax = t.argmax()
+            north_vector = na.cross(vecs[ax,:], normal_vector).ravel()
+        north_vector /= na.sqrt(na.dot(north_vector, north_vector))
+        east_vector = -na.cross(north_vector, normal_vector).ravel()
+        east_vector /= na.sqrt(na.dot(east_vector, east_vector))
+        self.unit_vectors = [north_vector, east_vector, normal_vector]
+        self.box_vectors = na.array([self.unit_vectors[0]*self.width[0],
+                                     self.unit_vectors[1]*self.width[1],
+                                     self.unit_vectors[2]*self.width[2]])
+
+        self.origin = center - 0.5*width[0]*self.unit_vectors[0] \
+                             - 0.5*width[1]*self.unit_vectors[1] \
+                             - 0.5*width[2]*self.unit_vectors[2]
+        self.back_center = center - 0.5*width[0]*self.unit_vectors[2]
+        self.front_center = center + 0.5*width[0]*self.unit_vectors[2]
+
+        self._initialize_source()
+        self._construct_vector_array()
+
+    def _initialize_source(self):
+        check, source, rf = self._partition_hierarchy_2d_inclined(
+                self.unit_vectors, self.origin, self.width, self.box_vectors)
+        if check:
+            self._base_source = self.pf.h.inclined_box(
+                self.origin, self.box_vectors)
+        else:
+            # To avoid doubling-up
+            self._base_source = source
+        self.source = source
+        self.res_fac = rf
+        # Note that if we want to do this in parallel, with 3D domain decomp
+        # for the grid/bricks, we can supply self._base_source here.  But,
+        # _distributed can't be overridden in that case.
+        self._brick_collection = HomogenizedBrickCollection(self.source)
+
+    def ray_cast(self, finalize=True):
+        if self.bricks is None: self.partition_grids()
+        # Now we order our bricks
+        total_cells, LE, RE = 0, [], []
+        for b in self.bricks:
+            LE.append(b.LeftEdge)
+            RE.append(b.RightEdge)
+            total_cells += na.prod(b.my_data.shape)
+        LE = na.array(LE) - self.back_center
+        RE = na.array(RE) - self.back_center
+        LE = na.sum(LE * self.unit_vectors[2], axis=1)
+        RE = na.sum(RE * self.unit_vectors[2], axis=1)
+        dist = na.minimum(LE, RE)
+        ind = na.argsort(dist)
+        pbar = get_pbar("Ray casting ", total_cells)
+        total_cells = 0
+        tfp = TransferFunctionProxy(self.transfer_function)
+        tfp.ns = self.sub_samples
+        for i, b in enumerate(self.bricks[ind]):
+            pos = b.cast_plane(tfp, self.vector_plane)
+            total_cells += na.prod(b.my_data.shape)
+            pbar.update(total_cells)
+        pbar.finish()
+        if finalize: self._finalize()
+
+    def _finalize(self):
+        #im = self._mpi_catdict(dict(image=self.image)).pop('image')
+        im, f = self._mpi_catrgb((self.image, self.resolution))
+        self.image = im
+
+    def dump_image(self, prefix):
+        fn = "%s.h5" % (self._get_filename(prefix))
+        mylog.info("Saving to %s", fn)
+        f = h5py.File(fn, "w")
+        f.create_dataset("/image", data=self.image)
+
+    def load_bricks(self, fn):
+        self.bricks = import_partitioned_grids(fn)
+
+    def save_bricks(self, fn):
+        # This will need to be modified for parallel
+        export_partitioned_grids(self.bricks, fn)
+
+    def partition_grids(self):
+        log_field = (self.fields[0] in self.pf.field_info and 
+                     self.pf.field_info[self.fields[0]].take_log)
+        self._brick_collection._partition_local_grids(self.fields, log_field)
+        # UNCOMMENT FOR PARALLELISM
+        #self._brick_collection._collect_bricks(self.source)
+        self.bricks = self._brick_collection.bricks
+
+    def _construct_vector_array(self):
+        rx = self.resolution[0] * self.res_fac[0]
+        ry = self.resolution[1] * self.res_fac[1]
+        # We should move away from pre-generation of vectors like this and into
+        # the usage of on-the-fly generation in the VolumeIntegrator module
+        self.image = na.zeros((rx,ry,4), dtype='float64', order='F')
+        # We might have a different width and back_center
+        bl = self.source.box_lengths
+        px = na.linspace(-bl[0]/2.0, bl[0]/2.0, rx)[:,None]
+        py = na.linspace(-bl[1]/2.0, bl[1]/2.0, ry)[None,:]
+        inv_mat = self.source._inv_mat
+        bc = self.source.origin + 0.5*self.source.box_vectors[0] \
+                                + 0.5*self.source.box_vectors[1]
+        vectors = na.zeros((rx, ry, 3),
+                            dtype='float64', order='F')
+        vectors[:,:,0] = inv_mat[0,0]*px + inv_mat[0,1]*py + bc[0]
+        vectors[:,:,1] = inv_mat[1,0]*px + inv_mat[1,1]*py + bc[1]
+        vectors[:,:,2] = inv_mat[2,0]*px + inv_mat[2,1]*py + bc[2]
+        bounds = (px.min(), px.max(), py.min(), py.max())
+        self.vector_plane = VectorPlane(vectors, self.box_vectors[2],
+                                    bc, bounds, self.image,
+                                    self.source._x_vec, self.source._y_vec)
+        self.vp_bounds = bounds
+        self.vectors = vectors
 
-    return partitioned_grids[ind], image, vectors, norm_vec, pos
+data_object_registry["volume_rendering"] = VolumeRendering

Modified: trunk/yt/funcs.py
==============================================================================
--- trunk/yt/funcs.py	(original)
+++ trunk/yt/funcs.py	Wed Mar 17 11:33:12 2010
@@ -28,6 +28,7 @@
 import progressbar as pb
 from math import floor, ceil
 from yt.logger import ytLogger as mylog
+from yt.exceptions import *
 
 # Some compatibility functions.  In the long run, these *should* disappear as
 # we move toward newer python versions.  Most were implemented to get things

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Wed Mar 17 11:33:12 2010
@@ -146,7 +146,7 @@
             self.pf = pf
             self.hierarchy = pf.hierarchy
         self.hierarchy.objects.append(weakref.proxy(self))
-        mylog.debug("Appending object to %s", self.pf)
+        mylog.debug("Appending object to %s (type: %s)", self.pf, type(self))
         if fields == None: fields = []
         self.fields = ensure_list(fields)[:]
         self.data = {}
@@ -1150,9 +1150,10 @@
 
     def _get_point_indices(self, grid):
         if self._pixelmask.max() == 0: return []
-        k = amr_utils.PointsInVolume(self._coord, self._pixelmask,
-                                     grid.LeftEdge, grid.RightEdge,
-                                     grid.child_mask, just_one(grid['dx']))
+        k = amr_utils.planar_points_in_volume(
+                self._coord, self._pixelmask,
+                grid.LeftEdge, grid.RightEdge,
+                grid.child_mask, just_one(grid['dx']))
         return k
 
     def _gen_node_name(self):
@@ -2011,6 +2012,68 @@
         """
         return math.pi * (self._radius)**2. * self._height * pf[unit]**3
 
+class AMRInclinedBox(AMR3DData):
+    _type_name="inclined_box"
+    _con_args = ('origin','box_vectors')
+
+    def __init__(self, origin, box_vectors, fields=None,
+                 pf=None, **kwargs):
+        """
+        A rectangular prism with arbitrary alignment to the computational
+        domain.  *origin* is the origin of the box, while *box_vectors* is an
+        array of ordering [ax, ijk] that describes the three vectors that
+        describe the box.  No checks are done to ensure that the box satisfies
+        a right-hand rule, but if it doesn't, behavior is undefined.
+        """
+        self.origin = na.array(origin)
+        self.box_vectors = na.array(box_vectors, dtype='float64')
+        self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
+        center = origin + 0.5*self.box_vectors.sum(axis=1)
+        AMR3DData.__init__(self, center, fields, pf, **kwargs)
+        self._setup_rotation_parameters()
+        self._refresh_data()
+
+    def _setup_rotation_parameters(self):
+        xv = self.box_vectors[0,:]
+        yv = self.box_vectors[1,:]
+        zv = self.box_vectors[2,:]
+        self._x_vec = xv / na.sqrt(na.dot(xv, xv))
+        self._y_vec = yv / na.sqrt(na.dot(yv, yv))
+        self._z_vec = zv / na.sqrt(na.dot(zv, zv))
+        self._rot_mat = na.array([self._x_vec,self._y_vec,self._z_vec])
+        self._inv_mat = na.linalg.pinv(self._rot_mat)
+
+    def _get_list_of_grids(self):
+        if self._grids is not None: return
+        GLE = self.pf.h.grid_left_edge
+        GRE = self.pf.h.grid_right_edge
+        goodI = amr_utils.find_grids_in_inclined_box(
+                    self.box_vectors, self.center, GLE, GRE)
+        cgrids = self.pf.h.grids[goodI.astype('bool')]
+        grids = []
+        for i,grid in enumerate(cgrids):
+            v = amr_utils.grid_points_in_volume(self.box_lengths, self.origin,
+                        self._rot_mat, grid.LeftEdge, grid.RightEdge, grid.dds,
+                        grid.child_mask, 1)
+            if v: grids.append(grid)
+        self._grids = na.array(grids, dtype='object')
+            
+
+    def _is_fully_enclosed(self, grid):
+        # This should be written at some point.
+        # We'd rotate all eight corners into the space of the box, then check to
+        # see if all are enclosed.
+        return False
+
+    def _get_cut_mask(self, grid):
+        if self._is_fully_enclosed(grid):
+            return True
+        pm = na.zeros(grid.ActiveDimensions, dtype='int32')
+        amr_utils.grid_points_in_volume(self.box_lengths, self.origin,
+                    self._rot_mat, grid.LeftEdge, grid.RightEdge, grid.dds, pm, 0)
+        return pm
+        
+
 class AMRRegionBase(AMR3DData):
     """
     AMRRegions are rectangular prisms of data.
@@ -2189,7 +2252,7 @@
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         if radius < self.hierarchy.get_smallest_dx():
-            raise SyntaxError("Your radius is smaller than your finest cell!")
+            raise YTSphereTooSmall(pf, radius, self.hierarchy.get_smallest_dx())
         self.set_field_parameter('radius',radius)
         self.radius = radius
         self.DW = self.pf["DomainRightEdge"] - self.pf["DomainLeftEdge"]

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Wed Mar 17 11:33:12 2010
@@ -436,16 +436,24 @@
 
     def get_vertex_centered_data(self, field, smoothed=True):
         cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-        # Bounds should be cell-centered
-        bds = na.array(zip(cg.left_edge+cg.dds/2.0, cg.right_edge-cg.dds/2.0)).ravel()
-        interp = TrilinearFieldInterpolator(na.log10(cg[field]), bds, ['x','y','z'])
-        ad = self.ActiveDimensions + 1
-        x,y,z = na.mgrid[self.LeftEdge[0]:self.RightEdge[0]:ad[0]*1j,
-                         self.LeftEdge[1]:self.RightEdge[1]:ad[1]*1j,
-                         self.LeftEdge[2]:self.RightEdge[2]:ad[2]*1j]
-        dd = {'x':x,'y':y,'z':z}
-        scalars = 10**interp(dict(x=x,y=y,z=z))
-        return scalars
+        # We have two extra zones in every direction
+        if field in self.pf.field_info and self.pf.field_info[field].take_log:
+            cf = na.log10(cg[field])
+        else:
+            cf = cg[field]
+        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+        na.add(new_field, cf[1: ,1: ,1: ], new_field)
+        na.add(new_field, cf[:-1,1: ,1: ], new_field)
+        na.add(new_field, cf[1: ,:-1,1: ], new_field)
+        na.add(new_field, cf[1: ,1: ,:-1], new_field)
+        na.add(new_field, cf[:-1,1: ,:-1], new_field)
+        na.add(new_field, cf[1: ,:-1,:-1], new_field)
+        na.add(new_field, cf[:-1,:-1,1: ], new_field)
+        na.add(new_field, cf[:-1,:-1,:-1], new_field)
+        na.multiply(new_field, 0.125, new_field)
+        if field in self.pf.field_info and self.pf.field_info[field].take_log:
+            na.power(10.0, new_field, new_field)
+        return new_field
 
 class EnzoGrid(AMRGridPatch):
     """
@@ -578,3 +586,86 @@
     def __repr__(self):
         return "OrionGrid_%04i" % (self.id)
 
+class ProtoGadgetGrid(object):
+    def __init__(self, level, left_edge, right_edge, particles, parent = None):
+        # generate the left/right edge from the octant and the level argument
+        self.level = level
+        self.left_edge = left_edge
+        self.right_edge = right_edge
+        self.particles = particles
+        self.parent = parent # We break convention here -- this is a protogrid
+        self.children = []
+
+    def split(self):
+        #split_axis = (self.left_edge + self.right_edge) / 2.0
+        W = 0.5 * (self.right_edge - self.left_edge) # This is the half-width
+        keep = na.ones(self.particles.shape[1], dtype='bool')
+        for i in xrange(2):
+            for j in xrange(2):
+                for k in xrange(2):
+                    LE = self.left_edge.copy()
+                    LE += na.array([i, j, k], dtype='float64') * W
+                    RE = LE + W
+                    pi = self.select_particles(LE, RE)
+                    keep &= (~pi)
+                    g = ProtoGadgetGrid(self.level + 1, LE, RE,
+                                        self.particles[:,pi], self)
+                    self.children.append(g)
+                    yield g
+        self.particles = self.particles[:,keep]
+
+    def select_particles(self, LE, RE):
+        pi = na.all( (self.particles >= LE[:,None])
+                   & (self.particles <  RE[:,None]), axis=0)
+        return pi
+
+    def refine(self, npart = 128):
+        gs = [self]
+        if self.particles.shape[1] < npart: return gs
+        for grid in self.split():
+            gs += grid.refine(npart)
+        return gs
+
+class GadgetGrid(AMRGridPatch):
+
+    _id_offset = 0
+
+    def __init__(self, id, hierarchy, proto_grid):
+        AMRGridPatch.__init__(self, id, hierarchy = hierarchy)
+        self.Children = []
+        if proto_grid.parent is None:
+            self.Parent = None
+        else:
+            self.Parent = proto_grid.parent.real_grid
+            self.Parent.Children.append(self)
+        self.Level = proto_grid.level
+        self.LeftEdge = proto_grid.left_edge
+        self.RightEdge = proto_grid.right_edge
+        self.NumberOfParticles = proto_grid.particles.shape[1]
+        self._storage = {}
+        self._storage['particle_position_x'] = proto_grid.particles[0,:]
+        self._storage['particle_position_y'] = proto_grid.particles[1,:]
+        self._storage['particle_position_z'] = proto_grid.particles[2,:]
+        # Something should be done here for the volume change as you go down
+        # the hierarchy...
+        self._storage['particle_mass'] = na.ones(self.NumberOfParticles,
+                                           dtype='float64') * (8 ** self.Level)
+        # Our dx is a bit fluid here, so we defer
+        dims = self.pf["TopGridDimensions"]
+        # Hard code to refineby 2
+        dds = 1.0 / (dims * 2**self.Level)
+        ad = na.rint((self.RightEdge - self.LeftEdge) / dds)
+        self.ActiveDimensions = ad.astype('int64')
+
+    def __repr__(self):
+        return "GadgetGrid_%04i" % (self.id)
+
+class ChomboGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id"]
+    def __init__(self, id, hierarchy, level = -1):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level

Added: trunk/yt/lagos/ChomboFields.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/ChomboFields.py	Wed Mar 17 11:33:12 2010
@@ -0,0 +1,105 @@
+"""
+Chombo-specific fields
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: UC Berkeley
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from UniversalFields import *
+class ChomboFieldContainer(CodeFieldInfoContainer):
+    _shared_state = {}
+    _field_list = {}
+ChomboFieldInfo = ChomboFieldContainer()
+add_chombo_field = ChomboFieldInfo.add_field
+
+add_field = add_chombo_field
+
+add_field("density", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("density")],
+          units=r"\rm{g}/\rm{cm}^3")
+
+ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_field("X-momentum", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("X-Momentum")],
+          units=r"",display_name=r"B_x")
+ChomboFieldInfo["X-momentum"]._projected_units=r""
+
+add_field("Y-momentum", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("Y-Momentum")],
+          units=r"",display_name=r"B_y")
+ChomboFieldInfo["Y-momentum"]._projected_units=r""
+
+add_field("Z-momentum", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("Z-Momentum")],
+          units=r"",display_name=r"B_z")
+ChomboFieldInfo["Z-momentum"]._projected_units=r""
+
+add_field("X-magnfield", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("X-Magnfield")],
+          units=r"",display_name=r"B_x")
+ChomboFieldInfo["X-magnfield"]._projected_units=r""
+
+add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("Y-Magnfield")],
+          units=r"",display_name=r"B_y")
+ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+
+add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("Z-Magnfield")],
+          units=r"",display_name=r"B_z")
+ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+
+def _MagneticEnergy(field,data):
+    return (data["X-magnfield"]**2 +
+            data["Y-magnfield"]**2 +
+            data["Z-magnfield"]**2)/2.
+add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
+          units=r"",display_name=r"B^2/8\pi")
+ChomboFieldInfo["MagneticEnergy"]._projected_units=r""
+
+def _xVelocity(field, data):
+    """generate x-velocity from x-momentum and density
+
+    """
+    return data["X-momentum"]/data["density"]
+add_field("x-velocity",function=_xVelocity, take_log=False,
+          units=r'\rm{cm}/\rm{s}')
+
+def _yVelocity(field,data):
+    """generate y-velocity from y-momentum and density
+
+    """
+    #try:
+    #    return data["xvel"]
+    #except KeyError:
+    return data["Y-momentum"]/data["density"]
+add_field("y-velocity",function=_yVelocity, take_log=False,
+          units=r'\rm{cm}/\rm{s}')
+
+def _zVelocity(field,data):
+    """generate z-velocity from z-momentum and density
+
+    """
+    return data["Z-momentum"]/data["density"]
+add_field("z-velocity",function=_zVelocity, take_log=False,
+          units=r'\rm{cm}/\rm{s}')
+    

Modified: trunk/yt/lagos/DataReadingFuncs.py
==============================================================================
--- trunk/yt/lagos/DataReadingFuncs.py	(original)
+++ trunk/yt/lagos/DataReadingFuncs.py	Wed Mar 17 11:33:12 2010
@@ -78,7 +78,7 @@
     def _read_exception(self):
         return None
 
-class IOHandlerHDF4(BaseIOHandler):
+class IOHandlerEnzoHDF4(BaseIOHandler):
 
     _data_style = "enzo_hdf4"
 
@@ -122,7 +122,7 @@
     def _read_exception(self):
         return SD.HDF4Error
 
-class IOHandlerHDF4_2D(IOHandlerHDF4):
+class IOHandlerEnzoHDF4_2D(IOHandlerEnzoHDF4):
 
     _data_style = "enzo_hdf4_2d"
 
@@ -137,7 +137,7 @@
     def modify(self, field):
         return field
 
-class IOHandlerHDF5(BaseIOHandler):
+class IOHandlerEnzoHDF5(BaseIOHandler):
 
     _data_style = "enzo_hdf5"
     _particle_reader = True
@@ -424,7 +424,52 @@
                         (grid.id, field))
         return t
 
+class IOHandlerGadget(BaseIOHandler):
+    _data_style = 'gadget_binary'
+    def _read_data_set(self, grid, field):
+        return grid._storage[field]
+
 #
-# BoxLib/Orion data readers follow
+# Chombo readers
 #
+
+class IOHandlerChomboHDF5(BaseIOHandler):
+    _data_style = "chombo_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def _field_dict(self,fhandle):
+        ncomp = int(fhandle['/'].attrs['num_components'])
+        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        val, keys = zip(*temp)
+        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+        return dict(zip(keys,val))
+        
+    def _read_field_names(self,grid):
+        fhandle = h5py.File(grid.filename,'r')
+        ncomp = int(fhandle['/'].attrs['num_components'])
+
+        return [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
     
+    def _read_data_set(self,grid,field):
+        fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+
+        field_dict = self._field_dict(fhandle)
+        lstring = 'level_%i' % grid.Level
+        lev = fhandle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+
+        return data.reshape(dims, order='F')
+                                          
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        return self._read_data_set(grid,field)[sl]
+

Modified: trunk/yt/lagos/EnzoFields.py
==============================================================================
--- trunk/yt/lagos/EnzoFields.py	(original)
+++ trunk/yt/lagos/EnzoFields.py	Wed Mar 17 11:33:12 2010
@@ -246,21 +246,6 @@
 add_field("particle_density", function=_pdensity,
           validators=[ValidateSpatial(0)], convert_function=_convertDensity)
 
-def _pdensity_pyx(field, data):
-    blank = na.zeros(data.ActiveDimensions, dtype='float32')
-    if data.NumberOfParticles == 0: return blank
-    CICDeposit_3(data["particle_position_x"].astype(na.float64),
-                 data["particle_position_y"].astype(na.float64),
-                 data["particle_position_z"].astype(na.float64),
-                 data["particle_mass"].astype(na.float32),
-                 na.int64(data.NumberOfParticles),
-                 blank, na.array(data.LeftEdge).astype(na.float64),
-                 na.array(data.ActiveDimensions).astype(na.int32),
-                 na.float64(data['dx']))
-    return blank
-add_field("particle_density_pyx", function=_pdensity_pyx,
-          validators=[ValidateSpatial(0)], convert_function=_convertDensity)
-
 def _spdensity_pyx(field, data):
     blank = na.zeros(data.ActiveDimensions, dtype='float32')
     if data.NumberOfParticles == 0: return blank

Modified: trunk/yt/lagos/FieldInfoContainer.py
==============================================================================
--- trunk/yt/lagos/FieldInfoContainer.py	(original)
+++ trunk/yt/lagos/FieldInfoContainer.py	Wed Mar 17 11:33:12 2010
@@ -114,6 +114,15 @@
 OrionFieldInfo = OrionFieldContainer()
 add_orion_field = OrionFieldInfo.add_field
 
+class GadgetFieldContainer(CodeFieldInfoContainer):
+    """
+    This is a container for Gadget-specific fields.
+    """
+    _shared_state = {}
+    _field_list = {}
+GadgetFieldInfo = GadgetFieldContainer()
+add_gadget_field = GadgetFieldInfo.add_field
+
 class ValidationException(Exception):
     pass
 

Modified: trunk/yt/lagos/HelperFunctions.py
==============================================================================
--- trunk/yt/lagos/HelperFunctions.py	(original)
+++ trunk/yt/lagos/HelperFunctions.py	Wed Mar 17 11:33:12 2010
@@ -27,8 +27,9 @@
 from yt.lagos import *
 
 class UnilinearFieldInterpolator:
-    def __init__(self, table, boundaries, field_names):
+    def __init__(self, table, boundaries, field_names, truncate=False):
         self.table = table.astype('float64')
+        self.truncate = truncate
         x0, x1 = boundaries
         self.x_name = field_names
         self.x_bins = na.linspace(x0, x1, table.shape[0]).astype('float64')
@@ -39,14 +40,17 @@
 
         x_i = (na.digitize(x_vals, self.x_bins) - 1).astype('int32')
         if na.any((x_i == -1) | (x_i == len(self.x_bins)-1)):
-            mylog.error("Sorry, but your values are outside" + \
-                        " the table!  Dunno what to do, so dying.")
-            mylog.error("Error was in: %s", data_object)
-            raise ValueError
+            if not self.truncate:
+                mylog.error("Sorry, but your values are outside" + \
+                            " the table!  Dunno what to do, so dying.")
+                mylog.error("Error was in: %s", data_object)
+                raise ValueError
+            else:
+                x_i = na.minimum(na.maximum(x_i,0), len(self.x_bins)-2)
 
         my_vals = na.zeros(x_vals.shape, dtype='float64')
         amr_utils.UnilinearlyInterpolate(self.table, x_vals, self.x_bins, x_i, my_vals)
-        return my_vals
+        return my_vals.reshape(orig_shape)
 
 class BilinearFieldInterpolator:
     def __init__(self, table, boundaries, field_names, truncate=False):

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Wed Mar 17 11:33:12 2010
@@ -496,6 +496,7 @@
             second_grid.Level = first_grid.Level
         self.grid_levels[sgi] = second_grid.Level
 
+    _bn = "%s.cpu%%04i"
     def _parse_binary_hierarchy(self):
         mylog.info("Getting the binary hierarchy")
         try:
@@ -513,7 +514,7 @@
         self.filenames = []
         grids = [self.grid(gi+1, self) for gi in xrange(self.num_grids)]
         giter = izip(grids, levels, procs, parents)
-        bn = "%s.cpu%%04i" % (self.pf)
+        bn = self._bn % (self.pf)
         pmap = [(bn % P,) for P in xrange(procs.max()+1)]
         for grid,L,P,Pid in giter:
             grid.Level = L
@@ -1174,3 +1175,153 @@
         self.ngrids = ngrids
         self.grids = []
     
+
+class GadgetHierarchy(AMRHierarchy):
+
+    grid = GadgetGrid
+
+    def __init__(self, pf, data_style):
+        self.directory = pf.fullpath
+        self.data_style = data_style
+        AMRHierarchy.__init__(self, pf, data_style)
+
+    def _count_grids(self):
+        # We actually construct our octree here!
+        # ...but we do read in our particles, it seems.
+        LE = na.zeros(3, dtype='float64')
+        RE = na.ones(3, dtype='float64')
+        base_grid = ProtoGadgetGrid(0, LE, RE, self.pf.particles)
+        self.proto_grids = base_grid.refine(8)
+        self.num_grids = len(self.proto_grids)
+        self.max_level = max( (g.level for g in self.proto_grids) )
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _parse_hierarchy(self):
+        grids = []
+        # We need to fill in dims, LE, RE, level, count
+        dims, LE, RE, levels, counts = [], [], [], [], []
+        self.proto_grids.sort(key = lambda a: a.level)
+        for i, pg in enumerate(self.proto_grids):
+            g = self.grid(i, self, pg)
+            pg.real_grid = g
+            grids.append(g)
+            dims.append(g.ActiveDimensions)
+            LE.append(g.LeftEdge)
+            RE.append(g.RightEdge)
+            levels.append(g.Level)
+            counts.append(g.NumberOfParticles)
+        del self.proto_grids
+        self.grids = na.array(grids, dtype='object')
+        self.grid_dimensions[:] = na.array(dims, dtype='int64')
+        self.grid_left_edge[:] = na.array(LE, dtype='float64')
+        self.grid_right_edge[:] = na.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+
+    def _populate_grid_objects(self):
+        # We don't need to do anything here
+        for g in self.grids: g._setup_dx()
+
+    def _detect_fields(self):
+        self.field_list = ['particle_position_%s' % ax for ax in 'xyz']
+
+    def _setup_unknown_fields(self):
+        pass
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+class ChomboHierarchy(AMRHierarchy):
+
+    grid = ChomboGrid
+    
+    def __init__(self,pf,data_style='chombo_hdf5'):
+        self.data_style = data_style
+        self.field_info = ChomboFieldContainer()
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename)
+
+        self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
+        self._levels = self._fhandle.listnames()[1:]
+        AMRHierarchy.__init__(self,pf,data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        ncomp = int(self._fhandle['/'].attrs['num_components'])
+        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+    
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._fhandle[lev]['Processors'].len()
+        
+    def _parse_hierarchy(self):
+        f = self._fhandle # shortcut
+        
+        # this relies on the first Group in the H5 file being
+        # 'Chombo_global'
+        levels = f.listnames()[1:]
+        self.grids = []
+        i = 0
+        for lev in levels:
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            boxes = f[lev]['boxes'].value
+            dx = f[lev].attrs['dx']
+            for level_id, box in enumerate(boxes):
+                self.grids.append(self.grid(len(self.grids),self,level=level_number))
+                self.grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*na.array([box['lo_i'],
+                                                      box['lo_j'],
+                                                      box['lo_k']],
+                                                     dtype=self.float_type)
+                self.grid_right_edge[i] = dx*(na.array([box['hi_i'],
+                                                        box['hi_j'],
+                                                        box['hi_k']],
+                                                       dtype=self.float_type) + 1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = na.array([box['hi_i']-box['lo_i'],
+                                                    box['hi_j']-box['lo_j'],
+                                                    box['hi_k']-box['lo_k']]) + 1
+                i += 1
+        self.grids = na.array(self.grids, dtype='object')
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_unknown_fields(self):
+        pass
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = na.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Wed Mar 17 11:33:12 2010
@@ -608,3 +608,96 @@
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)
     return pf
+
+class GadgetStaticOutput(StaticOutput):
+    _hierarchy_class = GadgetHierarchy
+    _fieldinfo_class = GadgetFieldContainer
+    def __init__(self, filename, particles, dimensions = None):
+        StaticOutput.__init__(self, filename, 'gadget_binary')
+        self.particles = particles
+        if "InitialTime" not in self.parameters:
+            self.parameters["InitialTime"] = 0.0
+        self.parameters["CurrentTimeIdentifier"] = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        if dimensions is None:
+            dimensions = na.ones((3,), dtype='int64') * 32
+        self.parameters['TopGridDimensions'] = dimensions
+        self.parameters['DomainLeftEdge'] = na.zeros(3, dtype='float64')
+        self.parameters['DomainRightEdge'] = na.ones(3, dtype='float64')
+        self.parameters['TopGridRank'] = 3
+        self.parameters['RefineBy'] = 2
+        self.field_info = self._fieldinfo_class()
+        self.units["Density"] = 1.0
+
+    def _parse_parameter_file(self):
+        pass
+
+    def _set_units(self):
+        self.units = {'1':1.0, 'unitary':1.0, 'cm':1.0}
+        self.time_units = {}
+
+class ChomboStaticOutput(StaticOutput):
+    _hierarchy_class = ChomboHierarchy
+    _fieldinfo_class = ChomboFieldContainer
+    
+    def __init__(self,filename,data_style='chombo_hdf5'):
+        StaticOutput.__init__(self,filename,data_style)
+
+        self.field_info = self._fieldinfo_class()
+        # hardcoded for now
+        self.parameters["InitialTime"] = 0.0
+        
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        seconds = 1 #self["Time"]
+        self.time_units['years'] = seconds / (365*3600*24.0)
+        self.time_units['days']  = seconds / (3600*24.0)
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _parse_parameter_file(self):
+        self.parameters["CurrentTimeIdentifier"] = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.parameters["DomainLeftEdge"] = na.array([0.,0.,0.])
+        self.parameters["DomainRightEdge"] = self.__calc_right_edge()
+        
+
+    def __calc_right_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        RE = dx0*((na.array(fileh['/level_0'].attrs['prob_domain']))[3:] + 1)
+        fileh.close()
+        return RE
+                   
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if (fileh.listnames())[0] == 'Chombo_global':
+                return True
+        except:
+            pass
+        return False
+
+    

Modified: trunk/yt/lagos/ParallelTools.py
==============================================================================
--- trunk/yt/lagos/ParallelTools.py	(original)
+++ trunk/yt/lagos/ParallelTools.py	Wed Mar 17 11:33:12 2010
@@ -277,7 +277,36 @@
         reg = self.hierarchy.region_strict(self.center, LE, RE)
         return True, reg
 
-    def _partition_hierarchy_3d(self, padding=0.0):
+    def _partition_hierarchy_2d_inclined(self, unit_vectors, origin, widths,
+                                         box_vectors, resolution = (1.0, 1.0)):
+        if not self._distributed:
+            ib = self.hierarchy.inclined_box(origin, box_vectors)
+            return False, ib, resolution
+        # We presuppose that unit_vectors is already unitary.  If it's not,
+        # caveat emptor.
+        uv = na.array(unit_vectors)
+        inv_mat = na.linalg.pinv(uv)
+        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
+        mi = MPI.COMM_WORLD.rank
+        cx, cy = na.unravel_index(mi, cc)
+        resolution = (1.0/cc[0], 1.0/cc[1])
+        # We are rotating with respect to the *origin*, not the back center,
+        # so we go from 0 .. width.
+        px = na.mgrid[0.0:1.0:(cc[0]+1)*1j][cx] * widths[0]
+        py = na.mgrid[0.0:1.0:(cc[1]+1)*1j][cy] * widths[1]
+        nxo = inv_mat[0,0]*px + inv_mat[0,1]*py + origin[0]
+        nyo = inv_mat[1,0]*px + inv_mat[1,1]*py + origin[1]
+        nzo = inv_mat[2,0]*px + inv_mat[2,1]*py + origin[2]
+        nbox_vectors = na.array(
+                       [unit_vectors[0] * widths[0]/cc[0],
+                        unit_vectors[1] * widths[1]/cc[1],
+                        unit_vectors[2] * widths[2]],
+                        dtype='float64')
+        norigin = na.array([nxo, nyo, nzo])
+        box = self.hierarchy.inclined_box(norigin, nbox_vectors)
+        return True, box, resolution
+        
+    def _partition_hierarchy_3d(self, padding=0.0, rank_ratio = 1):
         LE, RE = self.pf["DomainLeftEdge"].copy(), self.pf["DomainRightEdge"].copy()
         if not self._distributed:
            return False, LE, RE, self.hierarchy.all_data()
@@ -295,8 +324,8 @@
             RE = root_grids[0].RightEdge
             return True, LE, RE, self.hierarchy.region(self.center, LE, RE)
 
-        cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 3)
-        mi = MPI.COMM_WORLD.rank
+        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
+        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
         cx, cy, cz = na.unravel_index(mi, cc)
         x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
         y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
@@ -311,6 +340,33 @@
 
         return False, LE, RE, self.hierarchy.region_strict(self.center, LE, RE)
 
+    def _partition_region_3d(self, left_edge, right_edge, padding=0.0,
+            rank_ratio = 1):
+        """
+        Given a region, it subdivides it into smaller regions for parallel
+        analysis.
+        """
+        LE, RE = left_edge[:], right_edge[:]
+        if not self._distributed:
+            return LE, RE, re
+        
+        cc = MPI.Compute_dims(MPI.COMM_WORLD.size / rank_ratio, 3)
+        mi = MPI.COMM_WORLD.rank % (MPI.COMM_WORLD.size / rank_ratio)
+        cx, cy, cz = na.unravel_index(mi, cc)
+        x = na.mgrid[LE[0]:RE[0]:(cc[0]+1)*1j][cx:cx+2]
+        y = na.mgrid[LE[1]:RE[1]:(cc[1]+1)*1j][cy:cy+2]
+        z = na.mgrid[LE[2]:RE[2]:(cc[2]+1)*1j][cz:cz+2]
+
+        LE = na.array([x[0], y[0], z[0]], dtype='float64')
+        RE = na.array([x[1], y[1], z[1]], dtype='float64')
+
+        if padding > 0:
+            return True, \
+                LE, RE, self.hierarchy.periodic_region(self.center, LE-padding,
+                    RE+padding)
+
+        return False, LE, RE, self.hierarchy.region(self.center, LE, RE)
+
     def _partition_hierarchy_3d_bisection_list(self):
         """
         Returns an array that is used to drive _partition_hierarchy_3d_bisection,
@@ -786,6 +842,30 @@
         return None
 
     @parallel_passthrough
+    def _mpi_catrgb(self, data):
+        self._barrier()
+        data, final = data
+        if MPI.COMM_WORLD.rank == 0:
+            cc = MPI.Compute_dims(MPI.COMM_WORLD.size, 2)
+            nsize = final[0]/cc[0], final[1]/cc[1]
+            new_image = na.zeros((final[0], final[1], 4), dtype='float64')
+            new_image[0:nsize[0],0:nsize[1],:] = data[:]
+            for i in range(1,MPI.COMM_WORLD.size):
+                cy, cx = na.unravel_index(i, cc)
+                mylog.debug("Receiving image from % into bits %s:%s, %s:%s",
+                    i, nsize[0]*cx,nsize[0]*(cx+1),
+                       nsize[1]*cy,nsize[1]*(cy+1))
+                buf = _recv_array(source=i, tag=0).reshape(
+                    (nsize[0],nsize[1],4))
+                new_image[nsize[0]*cy:nsize[0]*(cy+1),
+                          nsize[1]*cx:nsize[1]*(cx+1),:] = buf[:]
+            data = new_image
+        else:
+            _send_array(data.ravel(), dest=0, tag=0)
+        data = MPI.COMM_WORLD.bcast(data)
+        return (data, final)
+
+    @parallel_passthrough
     def _mpi_catdict(self, data):
         field_keys = data.keys()
         field_keys.sort()
@@ -1246,36 +1326,49 @@
     # Non-blocking stuff.
     ###
 
-    def _mpi_Irecv_long(self, data, source):
+    def _mpi_Irecv_long(self, data, source, tag=0):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, 0)
+        return MPI.COMM_WORLD.Irecv([data, MPI.LONG], source, tag)
 
-    def _mpi_Irecv_double(self, data, source):
+    def _mpi_Irecv_double(self, data, source, tag=0):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, 0)
+        return MPI.COMM_WORLD.Irecv([data, MPI.DOUBLE], source, tag)
 
-    def _mpi_Isend_long(self, data, dest):
+    def _mpi_Isend_long(self, data, dest, tag=0):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, 0)
+        return MPI.COMM_WORLD.Isend([data, MPI.LONG], dest, tag)
 
-    def _mpi_Isend_double(self, data, dest):
+    def _mpi_Isend_double(self, data, dest, tag=0):
         if not self._distributed: return -1
-        return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, 0)
+        return MPI.COMM_WORLD.Isend([data, MPI.DOUBLE], dest, tag)
 
     def _mpi_Request_Waitall(self, hooks):
         if not self._distributed: return
         MPI.Request.Waitall(hooks)
 
+    def _mpi_Request_Waititer(self, hooks):
+        for i in xrange(len(hooks)):
+            req = MPI.Request.Waitany(hooks)
+            yield req
+
+    def _mpi_Request_Testall(self, hooks):
+        """
+        This returns False if any of the request hooks are un-finished,
+        and True if they are all finished.
+        """
+        if not self._distributed: return True
+        return MPI.Request.Testall(hooks)
+
     ###
     # End non-blocking stuff.
     ###
 
     def _mpi_get_size(self):
-        if not self._distributed: return None
+        if not self._distributed: return 1
         return MPI.COMM_WORLD.size
 
     def _mpi_get_rank(self):
-        if not self._distributed: return None
+        if not self._distributed: return 0
         return MPI.COMM_WORLD.rank
 
     def _mpi_info_dict(self, info):

Modified: trunk/yt/lagos/UniversalFields.py
==============================================================================
--- trunk/yt/lagos/UniversalFields.py	(original)
+++ trunk/yt/lagos/UniversalFields.py	Wed Mar 17 11:33:12 2010
@@ -35,10 +35,7 @@
 from yt.funcs import *
 from FieldInfoContainer import *
 
-try:
-    import cic_deposit
-except ImportError:
-    pass
+from yt.amr_utils import CICDeposit_3
 
 mh = 1.67e-24 # g
 me = 9.11e-28 # g
@@ -101,13 +98,13 @@
                       ValidateSpatial(0)])
 
 def _GridIndices(field, data):
-    return na.ones(data["Density"].shape)*(data.id-data._id_offset)
+    return na.ones(data["Ones"].shape)*(data.id-data._id_offset)
 add_field("GridIndices", function=_GridIndices,
           validators=[ValidateGridType(),
                       ValidateSpatial(0)], take_log=False)
 
 def _OnesOverDx(field, data):
-    return na.ones(data["Density"].shape,
+    return na.ones(data["Ones"].shape,
                    dtype=data["Density"].dtype)/data['dx']
 add_field("OnesOverDx", function=_OnesOverDx,
           display_field=False)
@@ -780,3 +777,22 @@
             (data["Density"]**(-0.5)))
 add_field("JeansMassMsun",function=_JeansMassMsun,
           units=r"\rm{M_{\odot}}")
+
+def _convertDensity(data):
+    return data.convert("Density")
+def _pdensity_pyx(field, data):
+    blank = na.zeros(data.ActiveDimensions, dtype='float32')
+    if data.NumberOfParticles == 0: return blank
+    CICDeposit_3(data["particle_position_x"].astype(na.float64),
+                 data["particle_position_y"].astype(na.float64),
+                 data["particle_position_z"].astype(na.float64),
+                 data["particle_mass"].astype(na.float32),
+                 na.int64(data.NumberOfParticles),
+                 blank, na.array(data.LeftEdge).astype(na.float64),
+                 na.array(data.ActiveDimensions).astype(na.int32),
+                 na.float64(data['dx']))
+    return blank
+add_field("particle_density_pyx", function=_pdensity_pyx,
+          validators=[ValidateSpatial(0)], convert_function=_convertDensity,
+          display_name=r"\mathrm{Particle}\/\mathrm{Density})")
+

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Wed Mar 17 11:33:12 2010
@@ -75,6 +75,7 @@
 from UniversalFields import *
 from EnzoFields import *
 from OrionFields import *
+from ChomboFields import *
 fieldInfo = EnzoFieldInfo
 
 # NOT the same as fieldInfo.add_field

Added: trunk/yt/parallel_tools/__init__.py
==============================================================================
--- (empty file)
+++ trunk/yt/parallel_tools/__init__.py	Wed Mar 17 11:33:12 2010
@@ -0,0 +1,27 @@
+"""
+Tools for parallelism.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from yt.lagos.ParallelTools import ParallelAnalysisInterface
+from distributed_object_collection import DistributedObjectCollection

Added: trunk/yt/parallel_tools/distributed_object_collection.py
==============================================================================
--- (empty file)
+++ trunk/yt/parallel_tools/distributed_object_collection.py	Wed Mar 17 11:33:12 2010
@@ -0,0 +1,135 @@
+"""
+A simple distributed object mechanism, for storing array-heavy objects.
+Meant to be subclassed.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as na
+from yt.funcs import *
+from yt.parallel_tools import ParallelAnalysisInterface
+from itertools import izip
+
+class DistributedObjectCollection(ParallelAnalysisInterface):
+    valid = True
+
+    def _get_object_info(self):
+        pass
+
+    def _set_object_info(self):
+        pass
+
+    def join_lists(self):
+        info_dict = self._get_object_info()
+        info_dict = self._mpi_catdict(info_dict)
+        self._set_object_info(info_dict)
+
+    def _collect_objects(self, desired_indices):
+        # We figure out which indices belong to which processor,
+        # then we pack them up, and we send a list to each processor.
+        request_count = []
+        owners = self._object_owners[desired_indices]
+        mylog.debug("Owner list: %s", na.unique1d(owners))
+        # Even if we have a million bricks, this should not take long.
+        s = self._mpi_get_size()
+        m = self._mpi_get_rank()
+        requests = dict( ( (i, []) for i in xrange(s) ) )
+        for i, p in izip(desired_indices, owners):
+            requests[p].append(i)
+        for p in sorted(requests):
+            requests[p] = na.array(requests[p], dtype='int64')
+            request_count.append(len(requests[p]))
+        size = len(request_count)
+        mylog.debug("Requesting: %s", request_count)
+        request_count = na.array(request_count, dtype='int64')
+        # Now we distribute our requests to all the processors.
+        # This is two-pass.  One to get the length of the arrays.  The second
+        # pass is to get the actual indices themselves.
+        request_count = self._mpi_joindict({m : request_count})
+        # Now we have our final array of requests, with arrangement
+        # (Nproc,Nproc).  First index corresponds to requesting proc, second to
+        # sending.  So [them,us] = 5 means we owe 5, whereas [us, them] means
+        # we are owed.
+        send_hooks = []
+        dsend_buffers, dsend_hooks = [], []
+        recv_hooks, recv_buffers = [], []
+        drecv_buffers, drecv_hooks = [], []
+        # We post our index-list and data receives from each processor.
+        mylog.debug("Posting data buffer receives")
+        proc_hooks = {}
+        for p, request_from in request_count.items():
+            if p == m: continue
+            size = request_from[m]
+            #if size == 0: continue
+            # We post receives of the grids we *asked* for.
+            # Note that indices into this are not necessarily processor ids.
+            # So we store.  This has to go before the appends or it's an
+            # off-by-one.
+            mylog.debug("Setting up index buffer of size %s for receive from %s",
+                        size, p)
+            proc_hooks[len(drecv_buffers)] = p
+            drecv_buffers.append(self._create_buffer(requests[p]))
+            drecv_hooks.append(self._mpi_Irecv_double(drecv_buffers[-1], p, 1))
+            recv_buffers.append(na.zeros(size, dtype='int64'))
+            # Our index list goes on 0, our buffer goes on 1.  We know how big
+            # the index list will be, now.
+            recv_hooks.append(self._mpi_Irecv_long(recv_buffers[-1], p, 0))
+        # Send our index lists into hte waiting buffers
+        mylog.debug("Sending index lists")
+        for p, ind_list in requests.items():
+            if p == m: continue
+            if len(ind_list) == 0: continue
+            # Now, we actually send our index lists.
+            send_hooks.append(self._mpi_Isend_long(ind_list, p, 0))
+        # Now we post receives for all of the data buffers.
+        mylog.debug("Sending data")
+        for i in self._mpi_Request_Waititer(recv_hooks):
+            # We get back the index, which here is identical to the processor
+            # number doing the send.  At this point, we can post our receives.
+            p = proc_hooks[i]
+            mylog.debug("Processing from %s", p)
+            ind_list = recv_buffers[i]
+            dsend_buffers.append(self._create_buffer(ind_list))
+            self._pack_buffer(ind_list, dsend_buffers[-1])
+            dsend_hooks.append(self._mpi_Isend_double(
+                dsend_buffers[-1], p, 1))
+        mylog.debug("Waiting on data receives: %s", len(drecv_hooks))
+        for i in self._mpi_Request_Waititer(drecv_hooks):
+            mylog.debug("Unpacking from %s", proc_hooks[i])
+            # Now we have to unpack our buffers
+            # Our key into this is actually the request for the processor
+            # number.
+            p = proc_hooks[i]
+            self._unpack_buffer(requests[p], drecv_buffers[i])
+        mylog.debug("Finalizing sends: %s", len(dsend_hooks))
+        for i in self._mpi_Request_Waititer(dsend_hooks):
+            continue
+
+    def _create_buffer(self, ind_list):
+        pass
+
+    def _pack_buffer(self, ind_list):
+        pass
+
+    def _unpack_buffer(self, ind_list, my_buffer):
+        pass
+

Modified: trunk/yt/raven/PlotTypes.py
==============================================================================
--- trunk/yt/raven/PlotTypes.py	(original)
+++ trunk/yt/raven/PlotTypes.py	Wed Mar 17 11:33:12 2010
@@ -999,10 +999,14 @@
 _mpl98_notify = lambda im,cb: cb.update_bruteforce(im)
 _mpl9x_notify = lambda im,cb: cb.notify(im)
 
-# This next function hurts, because it relies on the fact that
-# we're only differentiating between 0.9[01] and 0.98.
+# This next function hurts, because it relies on the fact that we're
+# only differentiating between 0.9[01] and 0.98. And if happens to be
+# 1.0, or any version with only 3 values, this should catch it.
 
-_mpl_version = float(matplotlib.__version__[:4])
+try:
+    _mpl_version = float(matplotlib.__version__[:4])
+except:
+    _mpl_version = float(matplotlib.__version__[:3])
 
 if _mpl_version < 0.98:
     _prefix = '_mpl9x'

Modified: trunk/yt/setup.py
==============================================================================
--- trunk/yt/setup.py	(original)
+++ trunk/yt/setup.py	Wed Mar 17 11:33:12 2010
@@ -9,6 +9,7 @@
     config.add_subpackage('fido')
     config.add_subpackage('reason')
     config.add_subpackage('extensions')
+    config.add_subpackage('parallel_tools')
     config.add_extension("amr_utils", 
         ["yt/amr_utils.c", "yt/_amr_utils/FixedInterpolator.c"], 
         include_dirs=["yt/_amr_utils/"],



More information about the yt-svn mailing list