[Yt-svn] yt-commit r1754 - in trunk/yt: . _amr_utils extensions extensions/volume_rendering

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Thu Jun 24 08:38:43 PDT 2010


Author: mturk
Date: Thu Jun 24 08:38:42 2010
New Revision: 1754
URL: http://yt.enzotools.org/changeset/1754

Log:
Backporting from hg:

 * Fixed png_writer to have correct non-1.0 aspect ratio shape
 * Adding Octree and QuadTree, to be used later.
 * De-referenced pointers in the volume rendering interpolator, sped things up
   minorly in same.
 * Enabled ability to have arbitrary directions for all vectors in the
   VectorPlane, for future use with non-orthographic projection.
 * Added non-functional PerspectiveCamera.  Needs more sophisticated clipping
   logic.
 * Added docstrings to TransferFunction.py.  One more needs to be added.



Added:
   trunk/yt/_amr_utils/Octree.pyx
   trunk/yt/_amr_utils/QuadTree.pyx
Modified:
   trunk/yt/_amr_utils/FixedInterpolator.c
   trunk/yt/_amr_utils/FixedInterpolator.h
   trunk/yt/_amr_utils/VolumeIntegrator.pyx
   trunk/yt/_amr_utils/png_writer.pyx
   trunk/yt/amr_utils.pyx
   trunk/yt/extensions/HierarchySubset.py
   trunk/yt/extensions/volume_rendering/TransferFunction.py
   trunk/yt/extensions/volume_rendering/__init__.py
   trunk/yt/extensions/volume_rendering/camera.py

Modified: trunk/yt/_amr_utils/FixedInterpolator.c
==============================================================================
--- trunk/yt/_amr_utils/FixedInterpolator.c	(original)
+++ trunk/yt/_amr_utils/FixedInterpolator.c	Thu Jun 24 08:38:42 2010
@@ -28,8 +28,9 @@
 
 #define VINDEX(A,B,C) data[((((A)+ci[0])*(ds[1]+1)+((B)+ci[1]))*(ds[2]+1)+ci[2]+(C))]
 //  (((C*ds[1])+B)*ds[0]+A)
+#define OINDEX(A,B,C) data[(A)*(ds[1]+1)*(ds[2]+1)+(B)*ds[2]+(B)+(C)]
 
-npy_float64 fast_interpolate(int *ds, int *ci, npy_float64 *dp,
+npy_float64 fast_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
                              npy_float64 *data)
 {
     int i;
@@ -48,7 +49,28 @@
     return dv;
 }
 
-npy_float64 trilinear_interpolate(int *ds, int *ci, npy_float64 *dp,
+npy_float64 offset_interpolate(int ds[3], npy_float64 dp[3], npy_float64 *data)
+{
+    int i;
+    npy_float64 dv, vz[4];
+
+    dv = 1.0 - dp[2];
+    vz[0] = dv*OINDEX(0,0,0) + dp[2]*OINDEX(0,0,1);
+    vz[1] = dv*OINDEX(0,1,0) + dp[2]*OINDEX(0,1,1);
+    vz[2] = dv*OINDEX(1,0,0) + dp[2]*OINDEX(1,0,1);
+    vz[3] = dv*OINDEX(1,1,0) + dp[2]*OINDEX(1,1,1);
+
+    dv = 1.0 - dp[1];
+    vz[0] = dv*vz[0] + dp[1]*vz[1];
+    vz[1] = dv*vz[2] + dp[1]*vz[3];
+
+    dv = 1.0 - dp[0];
+    vz[0] = dv*vz[0] + dp[0]*vz[1];
+
+    return vz[0];
+}
+
+npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
 				  npy_float64 *data)
 {
     /* dims is one less than the dimensions of the array */

Modified: trunk/yt/_amr_utils/FixedInterpolator.h
==============================================================================
--- trunk/yt/_amr_utils/FixedInterpolator.h	(original)
+++ trunk/yt/_amr_utils/FixedInterpolator.h	Thu Jun 24 08:38:42 2010
@@ -36,6 +36,8 @@
 npy_float64 fast_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
                              npy_float64 *data);
 
+npy_float64 offset_interpolate(int ds[3], npy_float64 dp[3], npy_float64 *data);
+
 npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3],
 				  npy_float64 *data);
 

Added: trunk/yt/_amr_utils/Octree.pyx
==============================================================================
--- (empty file)
+++ trunk/yt/_amr_utils/Octree.pyx	Thu Jun 24 08:38:42 2010
@@ -0,0 +1,280 @@
+"""
+A refine-by-two AMR-specific octree
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+import numpy as np
+cimport numpy as np
+# Double up here for def'd functions
+cimport numpy as cnp
+cimport cython
+
+from stdlib cimport malloc, free, abs
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+cdef struct OctreeNode:
+    np.float64_t *val
+    np.float64_t weight_val
+    np.int64_t pos[3]
+    int level
+    int nvals
+    OctreeNode *children[2][2][2]
+
+cdef void OTN_add_value(OctreeNode *self,
+        np.float64_t *val, np.float64_t weight_val):
+    cdef int i
+    for i in range(self.nvals):
+        self.val[i] += val[i]
+    self.weight_val += weight_val
+
+cdef void OTN_refine(OctreeNode *self):
+    cdef int i, j, i1, j1
+    cdef np.int64_t npos[3]
+    cdef OctreeNode *node
+    for i in range(2):
+        npos[0] = self.pos[0] * 2 + i
+        for j in range(2):
+            npos[1] = self.pos[1] * 2 + j
+            # We have to be careful with allocation...
+            for k in range(2):
+                npos[2] = self.pos[2] * 2 + k
+                self.children[i][j][k] = OTN_initialize(
+                            npos,
+                            self.nvals, self.val, self.weight_val,
+                            self.level + 1)
+    for i in range(self.nvals): self.val[i] = 0.0
+    self.weight_val = 0.0
+
+cdef OctreeNode *OTN_initialize(np.int64_t pos[3], int nvals,
+                        np.float64_t *val, np.float64_t weight_val,
+                        int level):
+    cdef OctreeNode *node
+    cdef int i, j
+    node = <OctreeNode *> malloc(sizeof(OctreeNode))
+    node.pos[0] = pos[0]
+    node.pos[1] = pos[1]
+    node.pos[2] = pos[2]
+    node.nvals = nvals
+    node.val = <np.float64_t *> malloc(
+                nvals * sizeof(np.float64_t))
+    for i in range(nvals):
+        node.val[i] = val[i]
+    node.weight_val = weight_val
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                node.children[i][j][k] = NULL
+    node.level = level
+    return node
+
+cdef void OTN_free(OctreeNode *node):
+    cdef int i, j
+    for i in range(2):
+        for j in range(2):
+            for k in range(2):
+                if node.children[i][j][k] == NULL: continue
+                OTN_free(node.children[i][j][k])
+    free(node.val)
+    free(node)
+
+cdef class Octree:
+    cdef int nvals
+    cdef np.int64_t po2[80]
+    cdef OctreeNode ****root_nodes
+    cdef np.int64_t top_grid_dims[3]
+
+    def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
+                  int nvals):
+        cdef int i, j
+        cdef OctreeNode *node
+        cdef np.int64_t pos[3]
+        cdef np.float64_t *vals = <np.float64_t *> alloca(
+                sizeof(np.float64_t)*nvals)
+        cdef np.float64_t weight_val = 0.0
+        self.nvals = nvals
+        for i in range(nvals): vals[i] = 0.0
+
+        self.top_grid_dims[0] = top_grid_dims[0]
+        self.top_grid_dims[1] = top_grid_dims[1]
+        self.top_grid_dims[2] = top_grid_dims[2]
+
+        # This wouldn't be necessary if we did bitshifting...
+        for i in range(80):
+            self.po2[i] = 2**i
+        # Cython doesn't seem to like sizeof(OctreeNode ***)
+        self.root_nodes = <OctreeNode ****> \
+            malloc(sizeof(void*) * top_grid_dims[0])
+
+        # We initialize our root values to 0.0.
+        for i in range(top_grid_dims[0]):
+            pos[0] = i
+            self.root_nodes[i] = <OctreeNode ***> \
+                malloc(sizeof(OctreeNode **) * top_grid_dims[1])
+            for j in range(top_grid_dims[1]):
+                pos[1] = j
+                self.root_nodes[i][j] = <OctreeNode **> \
+                    malloc(sizeof(OctreeNode *) * top_grid_dims[1])
+                for k in range(top_grid_dims[2]):
+                    pos[2] = k
+                    self.root_nodes[i][j][k] = OTN_initialize(
+                        pos, nvals, vals, weight_val, 0)
+
+    cdef void add_to_position(self,
+                 int level, np.int64_t pos[3],
+                 np.float64_t *val,
+                 np.float64_t weight_val):
+        cdef int i, j
+        cdef OctreeNode *node
+        node = self.find_on_root_level(pos, level)
+        cdef np.int64_t fac
+        for L in range(level):
+            if node.children[0][0][0] == NULL:
+                OTN_refine(node)
+            # Maybe we should use bitwise operators?
+            fac = self.po2[level - L - 1]
+            i = (pos[0] >= fac*(2*node.pos[0]+1))
+            j = (pos[1] >= fac*(2*node.pos[1]+1))
+            k = (pos[2] >= fac*(2*node.pos[2]+1))
+            node = node.children[i][j][k]
+        OTN_add_value(node, val, weight_val)
+            
+    cdef OctreeNode *find_on_root_level(self, np.int64_t pos[3], int level):
+        # We need this because the root level won't just have four children
+        # So we find on the root level, then we traverse the tree.
+        cdef np.int64_t i, j
+        i = <np.int64_t> (pos[0] / self.po2[level])
+        j = <np.int64_t> (pos[1] / self.po2[level])
+        k = <np.int64_t> (pos[2] / self.po2[level])
+        return self.root_nodes[i][j][k]
+        
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_array_to_tree(self, int level,
+            np.ndarray[np.int64_t, ndim=1] pxs,
+            np.ndarray[np.int64_t, ndim=1] pys,
+            np.ndarray[np.int64_t, ndim=1] pzs,
+            np.ndarray[np.float64_t, ndim=2] pvals,
+            np.ndarray[np.float64_t, ndim=1] pweight_vals):
+        cdef int np = pxs.shape[0]
+        cdef int p
+        cdef cnp.float64_t *vals
+        cdef cnp.float64_t *data = <cnp.float64_t *> pvals.data
+        cdef cnp.int64_t pos[3]
+        for p in range(np):
+            vals = data + self.nvals*p
+            pos[0] = pxs[p]
+            pos[1] = pys[p]
+            pos[2] = pzs[p]
+            self.add_to_position(level, pos, vals, pweight_vals[p])
+
+    def add_grid_to_tree(self, int level,
+                         np.ndarray[np.int64_t, ndim=1] start_index,
+                         np.ndarray[np.float64_t, ndim=2] pvals,
+                         np.ndarray[np.float64_t, ndim=2] wvals,
+                         np.ndarray[np.int32_t, ndim=2] cm):
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def get_all_from_level(self, int level, int count_only = 0):
+        cdef int i, j
+        cdef int total = 0
+        vals = []
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                for k in range(self.top_grid_dims[2]):
+                    total += self.count_at_level(self.root_nodes[i][j][k], level)
+        if count_only: return total
+        # Allocate our array
+        cdef np.ndarray[np.int64_t, ndim=2] npos
+        cdef np.ndarray[np.float64_t, ndim=2] nvals
+        cdef np.ndarray[np.float64_t, ndim=1] nwvals
+        npos = np.zeros( (total, 2), dtype='int64')
+        nvals = np.zeros( (total, self.nvals), dtype='float64')
+        nwvals = np.zeros( total, dtype='float64')
+        cdef np.int64_t curpos = 0
+        cdef np.int64_t *pdata = <np.int64_t *> npos.data
+        cdef np.float64_t *vdata = <np.float64_t *> nvals.data
+        cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                for k in range(self.top_grid_dims[2]):
+                    curpos += self.fill_from_level(self.root_nodes[i][j][k],
+                        level, curpos, pdata, vdata, wdata)
+        return npos, nvals, nwvals
+
+    cdef int count_at_level(self, OctreeNode *node, int level):
+        cdef int i, j
+        # We only really return a non-zero, calculated value if we are at the
+        # level in question.
+        if node.level == level:
+            # We return 1 if there are no finer points at this level and zero
+            # if there are
+            return (node.children[0][0][0] == NULL)
+        if node.children[0][0][0] == NULL: return 0
+        cdef int count = 0
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    count += self.count_at_level(node.children[i][j][k], level)
+        return count
+
+    cdef int fill_from_level(self, OctreeNode *node, int level,
+                              np.int64_t curpos,
+                              np.int64_t *pdata,
+                              np.float64_t *vdata,
+                              np.float64_t *wdata):
+        cdef int i, j
+        if node.level == level:
+            if node.children[0][0][0] != NULL: return 0
+            for i in range(self.nvals):
+                vdata[self.nvals * curpos + i] = node.val[i]
+            wdata[curpos] = node.weight_val
+            pdata[curpos * 3] = node.pos[0]
+            pdata[curpos * 3 + 1] = node.pos[1]
+            pdata[curpos * 3 + 2] = node.pos[2]
+            return 1
+        if node.children[0][0] == NULL: return 0
+        cdef np.int64_t added = 0
+        for i in range(2):
+            for j in range(2):
+                for k in range(2):
+                    added += self.fill_from_level(node.children[i][j][k],
+                            level, curpos + added, pdata, vdata, wdata)
+        return added
+
+    def __dealloc__(self):
+        cdef int i, j
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                for k in range(self.top_grid_dims[2]):
+                    OTN_free(self.root_nodes[i][j][k])
+                free(self.root_nodes[i][j])
+            free(self.root_nodes[i])
+        free(self.root_nodes)
+

Added: trunk/yt/_amr_utils/QuadTree.pyx
==============================================================================
--- (empty file)
+++ trunk/yt/_amr_utils/QuadTree.pyx	Thu Jun 24 08:38:42 2010
@@ -0,0 +1,257 @@
+"""
+A refine-by-two AMR-specific quadtree
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+
+import numpy as np
+cimport numpy as np
+# Double up here for def'd functions
+cimport numpy as cnp
+cimport cython
+
+from stdlib cimport malloc, free, abs
+
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+cdef struct QuadTreeNode:
+    np.float64_t *val
+    np.float64_t weight_val
+    np.int64_t pos[2]
+    int level
+    int nvals
+    QuadTreeNode *children[2][2]
+
+cdef void QTN_add_value(QuadTreeNode *self,
+        np.float64_t *val, np.float64_t weight_val):
+    cdef int i
+    for i in range(self.nvals):
+        self.val[i] += val[i]
+    self.weight_val += weight_val
+
+cdef void QTN_refine(QuadTreeNode *self):
+    cdef int i, j, i1, j1
+    cdef np.int64_t npos[2]
+    cdef QuadTreeNode *node
+    for i in range(2):
+        npos[0] = self.pos[0] * 2 + i
+        for j in range(2):
+            npos[1] = self.pos[1] * 2 + j
+            # We have to be careful with allocation...
+            self.children[i][j] = QTN_initialize(
+                        npos,
+                        self.nvals, self.val, self.weight_val,
+                        self.level + 1)
+    for i in range(self.nvals): self.val[i] = 0.0
+    self.weight_val = 0.0
+
+cdef QuadTreeNode *QTN_initialize(np.int64_t pos[2], int nvals,
+                        np.float64_t *val, np.float64_t weight_val,
+                        int level):
+    cdef QuadTreeNode *node
+    cdef int i, j
+    node = <QuadTreeNode *> malloc(sizeof(QuadTreeNode))
+    node.pos[0] = pos[0]
+    node.pos[1] = pos[1]
+    node.nvals = nvals
+    node.val = <np.float64_t *> malloc(
+                nvals * sizeof(np.float64_t))
+    for i in range(nvals):
+        node.val[i] = val[i]
+    node.weight_val = weight_val
+    for i in range(2):
+        for j in range(2):
+            node.children[i][j] = NULL
+    node.level = level
+    return node
+
+cdef void QTN_free(QuadTreeNode *node):
+    cdef int i, j
+    for i in range(2):
+        for j in range(2):
+            if node.children[i][j] == NULL: continue
+            QTN_free(node.children[i][j])
+    free(node.val)
+    free(node)
+
+cdef class QuadTree:
+    cdef int nvals
+    cdef np.int64_t po2[80]
+    cdef QuadTreeNode ***root_nodes
+    cdef np.int64_t top_grid_dims[2]
+
+    def __cinit__(self, np.ndarray[np.int64_t, ndim=1] top_grid_dims,
+                  int nvals):
+        cdef int i, j
+        cdef QuadTreeNode *node
+        cdef np.int64_t pos[2]
+        cdef np.float64_t *vals = <np.float64_t *> alloca(
+                sizeof(np.float64_t)*nvals)
+        cdef np.float64_t weight_val = 0.0
+        self.nvals = nvals
+        for i in range(nvals): vals[i] = 0.0
+
+        self.top_grid_dims[0] = top_grid_dims[0]
+        self.top_grid_dims[1] = top_grid_dims[1]
+
+        # This wouldn't be necessary if we did bitshifting...
+        for i in range(80):
+            self.po2[i] = 2**i
+        self.root_nodes = <QuadTreeNode ***> \
+            malloc(sizeof(QuadTreeNode **) * top_grid_dims[0])
+
+        # We initialize our root values to 0.0.
+        for i in range(top_grid_dims[0]):
+            pos[0] = i
+            self.root_nodes[i] = <QuadTreeNode **> \
+                malloc(sizeof(QuadTreeNode *) * top_grid_dims[1])
+            for j in range(top_grid_dims[1]):
+                pos[1] = j
+                self.root_nodes[i][j] = QTN_initialize(
+                    pos, nvals, vals, weight_val, 0)
+
+    cdef void add_to_position(self,
+                 int level, np.int64_t pos[2],
+                 np.float64_t *val,
+                 np.float64_t weight_val):
+        cdef int i, j
+        cdef QuadTreeNode *node
+        node = self.find_on_root_level(pos, level)
+        cdef np.int64_t fac
+        for L in range(level):
+            if node.children[0][0] == NULL:
+                QTN_refine(node)
+            # Maybe we should use bitwise operators?
+            fac = self.po2[level - L - 1]
+            i = (pos[0] >= fac*(2*node.pos[0]+1))
+            j = (pos[1] >= fac*(2*node.pos[1]+1))
+            node = node.children[i][j]
+        QTN_add_value(node, val, weight_val)
+            
+    cdef QuadTreeNode *find_on_root_level(self, np.int64_t pos[2], int level):
+        # We need this because the root level won't just have four children
+        # So we find on the root level, then we traverse the tree.
+        cdef np.int64_t i, j
+        i = <np.int64_t> (pos[0] / self.po2[level])
+        j = <np.int64_t> (pos[1] / self.po2[level])
+        return self.root_nodes[i][j]
+        
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def add_array_to_tree(self, int level,
+            np.ndarray[np.int64_t, ndim=1] pxs,
+            np.ndarray[np.int64_t, ndim=1] pys,
+            np.ndarray[np.float64_t, ndim=2] pvals,
+            np.ndarray[np.float64_t, ndim=1] pweight_vals):
+        cdef int np = pxs.shape[0]
+        cdef int p
+        cdef cnp.float64_t *vals
+        cdef cnp.float64_t *data = <cnp.float64_t *> pvals.data
+        cdef cnp.int64_t pos[2]
+        for p in range(np):
+            vals = data + self.nvals*p
+            pos[0] = pxs[p]
+            pos[1] = pys[p]
+            self.add_to_position(level, pos, vals, pweight_vals[p])
+
+    def add_grid_to_tree(self, int level,
+                         np.ndarray[np.int64_t, ndim=1] start_index,
+                         np.ndarray[np.float64_t, ndim=2] pvals,
+                         np.ndarray[np.float64_t, ndim=2] wvals,
+                         np.ndarray[np.int32_t, ndim=2] cm):
+        pass
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def get_all_from_level(self, int level, int count_only = 0):
+        cdef int i, j
+        cdef int total = 0
+        vals = []
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                total += self.count_at_level(self.root_nodes[i][j], level)
+        if count_only: return total
+        # Allocate our array
+        cdef np.ndarray[np.int64_t, ndim=2] npos
+        cdef np.ndarray[np.float64_t, ndim=2] nvals
+        cdef np.ndarray[np.float64_t, ndim=1] nwvals
+        npos = np.zeros( (total, 2), dtype='int64')
+        nvals = np.zeros( (total, self.nvals), dtype='float64')
+        nwvals = np.zeros( total, dtype='float64')
+        cdef np.int64_t curpos = 0
+        cdef np.int64_t *pdata = <np.int64_t *> npos.data
+        cdef np.float64_t *vdata = <np.float64_t *> nvals.data
+        cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                curpos += self.fill_from_level(self.root_nodes[i][j],
+                    level, curpos, pdata, vdata, wdata)
+        return npos, nvals, nwvals
+
+    cdef int count_at_level(self, QuadTreeNode *node, int level):
+        cdef int i, j
+        # We only really return a non-zero, calculated value if we are at the
+        # level in question.
+        if node.level == level:
+            # We return 1 if there are no finer points at this level and zero
+            # if there are
+            return (node.children[0][0] == NULL)
+        if node.children[0][0] == NULL: return 0
+        cdef int count = 0
+        for i in range(2):
+            for j in range(2):
+                count += self.count_at_level(node.children[i][j], level)
+        return count
+
+    cdef int fill_from_level(self, QuadTreeNode *node, int level,
+                              np.int64_t curpos,
+                              np.int64_t *pdata,
+                              np.float64_t *vdata,
+                              np.float64_t *wdata):
+        cdef int i, j
+        if node.level == level:
+            if node.children[0][0] != NULL: return 0
+            for i in range(self.nvals):
+                vdata[self.nvals * curpos + i] = node.val[i]
+            wdata[curpos] = node.weight_val
+            pdata[curpos * 2] = node.pos[0]
+            pdata[curpos * 2 + 1] = node.pos[1]
+            return 1
+        if node.children[0][0] == NULL: return 0
+        cdef np.int64_t added = 0
+        for i in range(2):
+            for j in range(2):
+                added += self.fill_from_level(node.children[i][j],
+                        level, curpos + added, pdata, vdata, wdata)
+        return added
+
+    def __dealloc__(self):
+        cdef int i, j
+        for i in range(self.top_grid_dims[0]):
+            for j in range(self.top_grid_dims[1]):
+                QTN_free(self.root_nodes[i][j])
+            free(self.root_nodes[i])
+        free(self.root_nodes)

Modified: trunk/yt/_amr_utils/VolumeIntegrator.pyx
==============================================================================
--- trunk/yt/_amr_utils/VolumeIntegrator.pyx	(original)
+++ trunk/yt/_amr_utils/VolumeIntegrator.pyx	Thu Jun 24 08:38:42 2010
@@ -63,10 +63,10 @@
     long int lrint(double x)
 
 cdef extern from "FixedInterpolator.h":
-    np.float64_t fast_interpolate(int *ds, int *ci, np.float64_t *dp,
+    np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
                                   np.float64_t *data)
-cdef extern from "FixedInterpolator.h":
-    np.float64_t trilinear_interpolate(int *ds, int *ci, np.float64_t *dp,
+    np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3], np.float64_t *data)
+    np.float64_t trilinear_interpolate(int ds[3], int ci[3], np.float64_t dp[3],
                                        np.float64_t *data)
     np.float64_t eval_gradient(int *ds, int *ci, np.float64_t *dp,
                                        np.float64_t *data, np.float64_t *grad)
@@ -182,16 +182,15 @@
                                   np.float64_t *rgba, np.float64_t *grad):
         cdef int i, fid, use
         cdef np.float64_t ta, tf, trgba[6], dot_prod
-        # This very quick pass doesn't hurt us too badly, and it helps for
-        # early-cutoff.  We check all the field tables, because we want to be
-        # able to attenuate even in the presence of no emissivity.
-        use = 0
-        for i in range(self.n_field_tables):
-            fid = self.field_tables[i].field_id
-            if (dvs[fid] >= self.field_tables[i].bounds[0]) and \
-               (dvs[fid] <= self.field_tables[i].bounds[1]):
-                use = 1
-                break
+        # NOTE: We now disable this.  I have left it to ease the process of
+        # potentially, one day, re-including it.
+        #use = 0
+        #for i in range(self.n_field_tables):
+        #    fid = self.field_tables[i].field_id
+        #    if (dvs[fid] >= self.field_tables[i].bounds[0]) and \
+        #       (dvs[fid] <= self.field_tables[i].bounds[1]):
+        #        use = 1
+        #        break
         for i in range(self.n_field_tables):
             self.istorage[i] = FIT_get_value(&self.field_tables[i], dvs)
         # We have to do this after the interpolation
@@ -227,12 +226,13 @@
     cdef int nv[2]
     cdef int vp_strides[3]
     cdef int im_strides[3]
+    cdef int vd_strides[3]
     cdef public object ax_vec, ay_vec
     cdef np.float64_t *x_vec, *y_vec
 
     def __cinit__(self, 
                   np.ndarray[np.float64_t, ndim=3] vp_pos,
-                  np.ndarray[np.float64_t, ndim=1] vp_dir,
+                  np.ndarray vp_dir,
                   np.ndarray[np.float64_t, ndim=1] center,
                   bounds,
                   np.ndarray[np.float64_t, ndim=3] image,
@@ -259,6 +259,11 @@
         for i in range(3):
             self.vp_strides[i] = vp_pos.strides[i] / 8
             self.im_strides[i] = image.strides[i] / 8
+        if vp_dir.ndim > 1:
+            for i in range(3):
+                self.vd_strides[i] = vp_dir.strides[i] / 8
+        else:
+            self.vd_strides[0] = self.vd_strides[1] = self.vd_strides[2] = -1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -339,19 +344,30 @@
         cdef int vi, vj, hit, i, ni, nj, nn
         cdef int iter[4]
         cdef np.float64_t v_pos[3], v_dir[3], rgba[6], extrema[4]
+        hit = 0
         self.calculate_extent(vp, extrema)
         vp.get_start_stop(extrema, iter)
         iter[0] = iclip(iter[0], 0, vp.nv[0])
         iter[1] = iclip(iter[1], 0, vp.nv[0])
         iter[2] = iclip(iter[2], 0, vp.nv[1])
         iter[3] = iclip(iter[3], 0, vp.nv[1])
-        hit = 0
-        for vi in range(iter[0], iter[1]):
-            for vj in range(iter[2], iter[3]):
-                vp.copy_into(vp.vp_pos, v_pos, vi, vj, 3, vp.vp_strides)
-                vp.copy_into(vp.image, rgba, vi, vj, 3, vp.im_strides)
-                self.integrate_ray(v_pos, vp.vp_dir, rgba, tf)
-                vp.copy_back(rgba, vp.image, vi, vj, 3, vp.im_strides)
+        if vp.vd_strides[0] == -1:
+            for vi in range(iter[0], iter[1]):
+                for vj in range(iter[2], iter[3]):
+                    vp.copy_into(vp.vp_pos, v_pos, vi, vj, 3, vp.vp_strides)
+                    vp.copy_into(vp.image, rgba, vi, vj, 3, vp.im_strides)
+                    self.integrate_ray(v_pos, vp.vp_dir, rgba, tf)
+                    vp.copy_back(rgba, vp.image, vi, vj, 3, vp.im_strides)
+        else:
+            # If we do not have an orthographic projection, we have to cast all
+            # our rays (until we can get an extrema calculation...)
+            for vi in range(vp.nv[0]):
+                for vj in range(vp.nv[1]):
+                    vp.copy_into(vp.vp_pos, v_pos, vi, vj, 3, vp.vp_strides)
+                    vp.copy_into(vp.image, rgba, vi, vj, 3, vp.im_strides)
+                    vp.copy_into(vp.vp_dir, v_dir, vi, vj, 3, vp.vd_strides)
+                    self.integrate_ray(v_pos, v_dir, rgba, tf)
+                    vp.copy_back(rgba, vp.image, vi, vj, 3, vp.im_strides)
         return hit
 
     @cython.boundscheck(False)
@@ -388,6 +404,7 @@
                                  TransferFunctionProxy tf):
         cdef int cur_ind[3], step[3], x, y, i, n, flat_ind, hit, direction
         cdef np.float64_t intersect_t = 1.0
+        cdef np.float64_t iv_dir[3]
         cdef np.float64_t intersect[3], tmax[3], tdelta[3]
         cdef np.float64_t enter_t, dist, alpha, dt, exit_t
         cdef np.float64_t tr, tl, temp_x, temp_y, dv
@@ -398,8 +415,8 @@
                 step[i] = 1
             x = (i+1) % 3
             y = (i+2) % 3
-            tl = (self.left_edge[i] - v_pos[i])/v_dir[i]
-            tr = (self.right_edge[i] - v_pos[i])/v_dir[i]
+            iv_dir[i] = 1.0/v_dir[0]
+            tl = (self.left_edge[i] - v_pos[i])*iv_dir[i]
             temp_x = (v_pos[x] + tl*v_dir[x])
             temp_y = (v_pos[y] + tl*v_dir[y])
             if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
@@ -407,6 +424,7 @@
                0.0 <= tl and tl < intersect_t:
                 direction = i
                 intersect_t = tl
+            tr = (self.right_edge[i] - v_pos[i])*iv_dir[i]
             temp_x = (v_pos[x] + tr*v_dir[x])
             temp_y = (v_pos[y] + tr*v_dir[y])
             if self.left_edge[x] <= temp_x and temp_x <= self.right_edge[x] and \
@@ -425,7 +443,7 @@
                                       step[i]*1e-8*self.dds[i] -
                                       self.left_edge[i])*self.idds[i])
             tmax[i] = (((cur_ind[i]+step[i])*self.dds[i])+
-                        self.left_edge[i]-v_pos[i])/v_dir[i]
+                        self.left_edge[i]-v_pos[i])*iv_dir[i]
             # This deals with the asymmetry in having our indices refer to the
             # left edge of a cell, but the right edge of the brick being one
             # extra zone out.
@@ -434,11 +452,11 @@
             if cur_ind[i] < 0 or cur_ind[i] >= self.dims[i]: return 0
             if step[i] > 0:
                 tmax[i] = (((cur_ind[i]+1)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])/v_dir[i]
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
             if step[i] < 0:
                 tmax[i] = (((cur_ind[i]+0)*self.dds[i])
-                            +self.left_edge[i]-v_pos[i])/v_dir[i]
-            tdelta[i] = (self.dds[i]/v_dir[i])
+                            +self.left_edge[i]-v_pos[i])*iv_dir[i]
+            tdelta[i] = (self.dds[i]*iv_dir[i])
             if tdelta[i] < 0: tdelta[i] *= -1
         # We have to jumpstart our calculation
         enter_t = intersect_t
@@ -498,6 +516,8 @@
         grad[0] = grad[1] = grad[2] = 0.0
         cdef int dti, i
         dt = (exit_t - enter_t) / tf.ns # 4 samples should be dt=0.25
+        cdef int offset = ci[0] * (self.dims[1] + 1) * (self.dims[2] + 1) \
+                        + ci[1] * (self.dims[2] + 1) + ci[2]
         for i in range(3):
             # temp is the left edge of the current cell
             temp = ci[i] * self.dds[i] + self.left_edge[i]
@@ -507,7 +527,7 @@
             ds[i] = v_dir[i] * self.idds[i] * dt
         for dti in range(tf.ns): 
             for i in range(self.n_fields):
-                self.dvs[i] = trilinear_interpolate(self.dims, ci, dp, self.data[i])
+                self.dvs[i] = offset_interpolate(self.dims, dp, self.data[i] + offset)
             #if (dv < tf.x_bounds[0]) or (dv > tf.x_bounds[1]):
             #    continue
             for i in range(3):

Modified: trunk/yt/_amr_utils/png_writer.pyx
==============================================================================
--- trunk/yt/_amr_utils/png_writer.pyx	(original)
+++ trunk/yt/_amr_utils/png_writer.pyx	Thu Jun 24 08:38:42 2010
@@ -104,8 +104,8 @@
 
     # This is something of a translation of the matplotlib _png module
     cdef png_byte *pix_buffer = <png_byte *> buffer.data
-    cdef int width = buffer.shape[0]
-    cdef int height = buffer.shape[1]
+    cdef int width = buffer.shape[1]
+    cdef int height = buffer.shape[0]
 
     cdef FILE* fileobj = fopen(filename, "wb")
     cdef png_bytep *row_pointers

Modified: trunk/yt/amr_utils.pyx
==============================================================================
--- trunk/yt/amr_utils.pyx	(original)
+++ trunk/yt/amr_utils.pyx	Thu Jun 24 08:38:42 2010
@@ -44,3 +44,4 @@
 include "_amr_utils/ContourFinding.pyx"
 include "_amr_utils/png_writer.pyx"
 include "_amr_utils/fortran_reader.pyx"
+include "_amr_utils/QuadTree.pyx"

Modified: trunk/yt/extensions/HierarchySubset.py
==============================================================================
--- trunk/yt/extensions/HierarchySubset.py	(original)
+++ trunk/yt/extensions/HierarchySubset.py	Thu Jun 24 08:38:42 2010
@@ -28,8 +28,6 @@
 from yt.lagos import AMRGridPatch, StaticOutput, AMRHierarchy
 import h5py, os.path
 
-import yt.commands as commands
-
 class DummyHierarchy(object):
     pass
 

Modified: trunk/yt/extensions/volume_rendering/TransferFunction.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/TransferFunction.py	(original)
+++ trunk/yt/extensions/volume_rendering/TransferFunction.py	Thu Jun 24 08:38:42 2010
@@ -30,6 +30,30 @@
 
 class TransferFunction(object):
     def __init__(self, x_bounds, nbins=256):
+        r"""A transfer function governs the transmission of emission and
+        absorption through a volume.
+
+        Transfer functions are defined by boundaries, bins, and the value that
+        governs transmission through that bin.  This is scaled between 0 and 1.
+        When integrating through a volume. the value through a given cell is
+        defined by the value calculated in the transfer function.
+
+        Parameters
+        ----------
+        x_bounds : tuple of floats
+            The min and max for the transfer function.  Values below or above
+            these values are discarded.
+        nbins : int
+            How many bins to calculate; in betwee, linear interpolation is
+            used, so low values are typically fine.
+
+        Notes
+        -----
+        Typically, raw transfer functions are not generated unless particular
+        and specific control over the integration is desired.  Usually either
+        color transfer functions, where the color values are calculated from
+        color tables, or multivariate transfer functions are used.
+        """
         self.pass_through = 0
         self.nbins = nbins
         self.x_bounds = x_bounds
@@ -37,10 +61,56 @@
         self.y = na.zeros(nbins, dtype='float64')
 
     def add_gaussian(self, location, width, height):
+        r"""Add a Gaussian distribution to the transfer function.
+
+        Typically, when rendering isocontours, a Guassian distribution is the
+        easiest way to draw out features.  The spread provides a softness.
+        The values are calculated as :math:`f(x) = h \exp{-(x-x_0)^2 / w)`.
+
+        Parameters
+        ----------
+        location : float
+            The centroid of the Gaussian (:math:`x_0` in the above equation.)
+        width : float
+            The relative width (:math:`w` in the above equation.)
+        height : float
+            The peak height (:math:`h` in the above equation.)  Note that while
+            values greater 1.0 will be accepted, the values of the transmission
+            function are clipped at 1.0.
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        """
         vals = height * na.exp(-(self.x - location)**2.0/width)
         self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
 
     def add_line(self, start, stop):
+        r"""Add a line between two points to the transmission function.
+
+        This will accept a starting point in (x,y) and an ending point in (x,y)
+        and set the values of the transmission function between those x-values
+        to be along the line connecting the y values.
+
+        Parameters
+        ----------
+        start : tuple of floats
+            (x0, y0), the starting point.  x0 is between the bounds of the
+            transfer function and y0 must be between 0.0 and 1.0.
+        stop : tuple of floats
+            (x1, y1), the ending point.  x1 is between the bounds of the
+            transfer function and y1 must be between 0.0 and 1.0.
+
+        Examples
+        --------
+        This will set the transfer function to be linear from 0.0 to 1.0,
+        across the bounds of the function.
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_line( (-10.0, 0.0), (-5.0, 1.0) )
+        """
         x0, y0 = start
         x1, y1 = stop
         slope = (y1-y0)/(x1-x0)
@@ -49,7 +119,36 @@
             slope * (self.x - x0) + y0
         self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
 
-    def add_step(self,start,stop,value):
+    def add_step(self, start, stop, value):
+        r"""Adds a step function to the transfer function.
+
+        This accepts a `start` and a `stop`, and then in between those points the
+        transfer function is set to the maximum of the transfer function and
+        the `value`.
+
+        Parameters
+        ----------
+        start : float
+            This is the beginning of the step function; must be within domain
+            of the transfer function.
+        stop : float
+            This is the ending of the step function; must be within domain
+            of the transfer function.
+        value : float
+            The value the transfer function will be set to between `start` and
+            `stop`.  Note that the transfer function will *actually* be set to
+            max(y, value) where y is the existing value of the transfer
+            function.
+
+        Examples
+        --------
+        Note that in this example, we have added a step function, but the
+        Gaussian that already exists will "win" where it exceeds 0.5.
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-7.0, 0.01, 1.0)
+        >>> tf.add_step(-8.0, -6.0, 0.5)
+        """
         vals = na.zeros(self.x.shape, 'float64')
         vals[(self.x >= start) & (self.x <= stop)] = value
         self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
@@ -74,6 +173,22 @@
         #self.y = na.clip(na.maximum(vals, self.y), 0.0, 1.0)
 
     def plot(self, filename):
+        r"""Save an image file of the transfer function.
+
+        This function loads up matplotlib, plots the transfer function and saves.
+
+        Parameters
+        ----------
+        filename : string
+            The file to save out the plot as.
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        >>> tf.plot("sample.png")
+        """
         import matplotlib;matplotlib.use("Agg");import pylab
         pylab.clf()
         pylab.plot(self.x, self.y, 'xk-')
@@ -83,6 +198,16 @@
 
 class MultiVariateTransferFunction(object):
     def __init__(self):
+        r"""This object constructs a set of field tables that allow for
+        multiple field variables to control the integration through a volme.
+
+        The integration through a volume typically only utilizes a single field
+        variable (for instance, Density) to set up and control the values
+        returned at the end of the integration.  For things like isocontours,
+        this is fine.  However, more complicated schema are possible by using
+        this object.  For instance, density-weighted emission that produces
+        colors based on the temperature of the fluid.
+        """
         self.n_field_tables = 0
         self.tables = [] # Tables are interpolation tables
         self.field_ids = [0] * 6 # This correlates fields with tables
@@ -92,6 +217,50 @@
 
     def add_field_table(self, table, field_id, weight_field_id = -1,
                         weight_table_id = -1):
+        r"""This accepts a table describing integration.
+
+        A "field table" is a tabulated set of values that govern the
+        integration through a given field.  These are defined not only by the
+        transmission coefficient, interpolated from the table itself, but the
+        `field_id` that describes which of several fields the integration
+        coefficient is to be calculated from.
+
+        Parameters
+        ----------
+        table : `TransferFunction`
+            The integration table to be added to the set of tables used during
+            the integration.
+        field_id : int
+            Each volume has an associated set of fields.  This identifies which
+            of those fields will be used to calculate the integration
+            coefficient from this table.
+        weight_field_id : int, optional
+            If specified, the value of the field this identifies will be
+            multiplied against the integration coefficient.
+        weight_table_id : int, optional
+            If specified, the value from the *table* this identifies will be
+            multiplied against the integration coefficient.
+
+        Notes
+        -----
+        This can be rather complicated.  It's recommended that if you are
+        interested in manipulating this in detail that you examine the source
+        code, specifically the function FIT_get_value in
+        yt/_amr_utils/VolumeIntegrator.pyx.
+
+        Examples
+        --------
+        This example shows how to link a new transfer function against field 0.
+        Note that this by itself does not link a *channel* for integration
+        against a field.  This is because the weighting system does not mandate
+        that all tables contribute to a channel, only that they contribute a
+        value which may be used by other field tables.
+
+        >>> mv = MultiVariateTransferFunction()
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian( -7.0, 0.01, 1.0)
+        >>> mv.add_field_table(tf, 0)
+        """
         self.tables.append(table)
         self.field_ids[self.n_field_tables] = field_id
         self.weight_field_ids[self.n_field_tables] = weight_field_id
@@ -99,12 +268,57 @@
         self.n_field_tables += 1
 
     def link_channels(self, table_id, channels = 0):
+        r"""Link an image channel to a field table.
+
+        Once a field table has been added, it can be linked against a channel (any
+        one of the six -- red, green, blue, red absorption, green absorption, blue
+        absorption) and then the value calculated for that field table will be
+        added to the integration for that channel.  Not all tables must be linked
+        against channels.
+
+        Parameters
+        ----------
+        table_id : int
+            The 0-indexed table to link.
+        channels : int or list of ints
+            The channel or channels to link with this table's calculated value.
+
+
+        Examples
+        --------
+        This example shows how to link a new transfer function against field 0, and
+        then link that table against all three RGB channels.  Typically an
+        absorption (or 'alpha') channel is also linked.
+
+        >>> mv = MultiVariateTransferFunction()
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian( -7.0, 0.01, 1.0)
+        >>> mv.add_field_table(tf, 0)
+        >>> mv.link_channels(0, [0,1,2])
+        """
         channels = ensure_list(channels)
         for c in channels:
             self.field_table_ids[c] = table_id
 
 class ColorTransferFunction(MultiVariateTransferFunction):
     def __init__(self, x_bounds, nbins=256):
+        r"""A complete set of transfer functions for standard color-mapping.
+
+        This is the best and easiest way to set up volume rendering.  It
+        creates field tables for all three colors, their alphas, and has
+        support for sampling color maps and adding independent color values at
+        all locations.  It will correctly set up the
+        `MultiVariateTransferFunction`.
+
+        Parameters
+        ----------
+        x_bounds : tuple of floats
+            The min and max for the transfer function.  Values below or above
+            these values are discarded.
+        nbins : int
+            How many bins to calculate; in betwee, linear interpolation is
+            used, so low values are typically fine.
+        """
         MultiVariateTransferFunction.__init__(self)
         self.x_bounds = x_bounds
         self.nbins = nbins
@@ -125,14 +339,85 @@
         self.link_channels(4, [3,4,5])
 
     def add_gaussian(self, location, width, height):
+        r"""Add a Gaussian distribution to the transfer function.
+
+        Typically, when rendering isocontours, a Guassian distribution is the
+        easiest way to draw out features.  The spread provides a softness.
+        The values are calculated as :math:`f(x) = h \exp{-(x-x_0)^2 / w)`.
+
+        Parameters
+        ----------
+        location : float
+            The centroid of the Gaussian (:math:`x_0` in the above equation.)
+        width : float
+            The relative width (:math:`w` in the above equation.)
+        height : list of 4 float
+            The peak height (:math:`h` in the above equation.)  Note that while
+            values greater 1.0 will be accepted, the values of the transmission
+            function are clipped at 1.0.  This must be a list, and it is in the
+            order of (red, green, blue, alpha).
+
+        Examples
+        --------
+        This adds a red spike.
+
+        >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, [1.0, 0.0, 0.0, 1.0])
+        """
         for tf, v in zip(self.funcs, height):
             tf.add_gaussian(location, width, v)
 
-    def add_step(self, start, stop, height):
-        for tf, v in zip(self.funcs, height):
+    def add_step(self, start, stop, value):
+        r"""Adds a step function to the transfer function.
+
+        This accepts a `start` and a `stop`, and then in between those points the
+        transfer function is set to the maximum of the transfer function and
+        the `value`.
+
+        Parameters
+        ----------
+        start : float
+            This is the beginning of the step function; must be within domain
+            of the transfer function.
+        stop : float
+            This is the ending of the step function; must be within domain
+            of the transfer function.
+        value : list of 4 floats
+            The value the transfer function will be set to between `start` and
+            `stop`.  Note that the transfer function will *actually* be set to
+            max(y, value) where y is the existing value of the transfer
+            function.  This must be a list, and it is in the order of (red,
+            green, blue, alpha).
+
+
+        Examples
+        --------
+        This adds a step function that will produce a white value at > -6.0.
+
+        >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+        >>> tf.add_step(-6.0, -5.0, [1.0, 1.0, 1.0, 1.0])
+        """
+        for tf, v in zip(self.funcs, value):
             tf.add_step(start, stop, v)
 
     def plot(self, filename):
+        r"""Save an image file of the transfer function.
+
+        This function loads up matplotlib, plots all of the constituent
+        transfer functions and saves.
+
+        Parameters
+        ----------
+        filename : string
+            The file to save out the plot as.
+
+        Examples
+        --------
+
+        >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+        >>> tf.add_layers(8)
+        >>> tf.plot("sample.png")
+        """
         from matplotlib import pyplot
         from matplotlib.ticker import FuncFormatter
         pyplot.clf()
@@ -160,6 +445,37 @@
         pyplot.savefig(filename)
 
     def sample_colormap(self, v, w, alpha=None, colormap="gist_stern"):
+        r"""Add a Gaussian based on an existing colormap.
+
+        Constructing pleasing Gaussians in a transfer function can pose some
+        challenges, so this function will add a single Gaussian whose colors
+        are taken from a colormap scaled between the bounds of the transfer
+        function.  As with `TransferFunction.add_gaussian`, the value is
+        calculated as :math:`f(x) = h \exp{-(x-x_0)^2 / w)` but with the height
+        for each color calculated from the colormap.
+
+        Parameters
+        ----------
+        v : float
+            The value at which the Gaussian is to be added.
+        w : float
+            The relative width (:math:`w` in the above equation.)
+        alpha : float, optional
+            The alpha value height for the Gaussian
+        colormap : string, optional
+            An acceptable colormap.  See either raven.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+
+        See Also
+        --------
+        ColorTransferFunction.add_layers : Many-at-a-time adder
+
+        Examples
+        --------
+
+        >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+        >>> tf.sample_colormap(-7.0, 0.01, 'algae')
+        """
         rel = (v - self.x_bounds[0])/(self.x_bounds[1] - self.x_bounds[0])
         cmap = get_cmap(colormap)
         r,g,b,a = cmap(rel)
@@ -170,6 +486,44 @@
 
     def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
                    colormap="gist_stern"):
+        r"""Add a set of Gaussians based on an existing colormap.
+
+        Constructing pleasing Gaussians in a transfer function can pose some
+        challenges, so this function will add several evenly-spaced Gaussians
+        whose colors are taken from a colormap scaled between the bounds of the
+        transfer function.   For each Gaussian to be added,
+        `ColorTransferFunction.sample_colormap` is called.
+
+        Parameters
+        ----------
+        N : int
+            How many Gaussians to add
+        w : float
+            The relative width of each Gaussian.  If not included, it is
+            calculated as 0.001 * (max_val - min_val) / N
+        mi : float, optional
+            If only a subset of the data range is to have the Gaussians added,
+            this is the minimum for that subset
+        ma : float, optional
+            If only a subset of the data range is to have the Gaussians added,
+            this is the maximum for that subset
+        alpha : list of floats, optional
+            The alpha value height for each Gaussian.  If not supplied, it is
+            calculated as the logspace between -2.0 and 0.0.
+        colormap : string, optional
+            An acceptable colormap.  See either raven.color_maps or
+            http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+
+        See Also
+        --------
+        ColorTransferFunction.sample_colormap : Single Gaussian adder
+
+        Examples
+        --------
+
+        >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+        >>> tf.add_layers(8)
+        """
         dist = (self.x_bounds[1] - self.x_bounds[0])
         if mi is None: mi = self.x_bounds[0] + dist/(10.0*N)
         if ma is None: ma = self.x_bounds[1] - dist/(10.0*N)
@@ -179,7 +533,27 @@
             self.sample_colormap(v, w, a, colormap=colormap)
 
 class ProjectionTransferFunction(MultiVariateTransferFunction):
-    def __init__(self, x_bounds = (-1e30, 1e30)):
+    def __init__(self, x_bounds = (-1e60, 1e60)):
+        r"""A transfer function that defines a simple projection.
+
+        To generate an interpolated, off-axis projection through a dataset,
+        this transfer function should be used.  It will create a very simple
+        table that merely sums along each ray.  Note that the end product will
+        need to be scaled by the total width through which the rays were cast,
+        a piece of information inacessible to the transfer function.
+
+        Parameters
+        ----------
+        x_boudns : tuple of floats, optional
+            If any of your values lie outside this range, they will be
+            truncated.
+
+        Notes
+        -----
+        When you use this transfer function, you may need to explicitly disable
+        logging of fields.
+
+        """
         MultiVariateTransferFunction.__init__(self)
         self.x_bounds = x_bounds
         self.nbins = 2

Modified: trunk/yt/extensions/volume_rendering/__init__.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/__init__.py	(original)
+++ trunk/yt/extensions/volume_rendering/__init__.py	Thu Jun 24 08:38:42 2010
@@ -38,4 +38,4 @@
 from image_handling import export_rgba, import_rgba, \
                            plot_channel, plot_rgb
 from software_sampler import VolumeRendering
-from camera import Camera, StereoPairCamera
+from camera import Camera, PerspectiveCamera, StereoPairCamera

Modified: trunk/yt/extensions/volume_rendering/camera.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/camera.py	(original)
+++ trunk/yt/extensions/volume_rendering/camera.py	Thu Jun 24 08:38:42 2010
@@ -94,13 +94,13 @@
                          self.resolution[1])[None,:]
         inv_mat = self.inv_mat
         bc = self.back_center
-        vectors = na.zeros((self.resolution[0], self.resolution[1], 3),
+        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
                           dtype='float64', order='C')
-        vectors[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
-        vectors[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
-        vectors[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
+        positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
+        positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
+        positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
         bounds = (px.min(), px.max(), py.min(), py.max())
-        vector_plane = au.VectorPlane(vectors, self.box_vectors[2],
+        vector_plane = au.VectorPlane(positions, self.box_vectors[2],
                                       self.back_center, bounds, image,
                                       self.unit_vectors[0],
                                       self.unit_vectors[1])
@@ -134,6 +134,38 @@
             self.zoom(f)
             yield self.snapshot()
 
+
+class PerspectiveCamera(Camera):
+    def get_vector_plane(self, image):
+        # We should move away from pre-generation of vectors like this and into
+        # the usage of on-the-fly generation in the VolumeIntegrator module
+        # We might have a different width and back_center
+        px = na.linspace(-self.width[0]/2.0, self.width[0]/2.0,
+                         self.resolution[0])[:,None]
+        py = na.linspace(-self.width[1]/2.0, self.width[1]/2.0,
+                         self.resolution[1])[None,:]
+        inv_mat = self.inv_mat
+        bc = self.back_center
+        positions = na.zeros((self.resolution[0], self.resolution[1], 3),
+                          dtype='float64', order='C')
+        positions[:,:,0] = inv_mat[0,0]*px+inv_mat[0,1]*py+self.back_center[0]
+        positions[:,:,1] = inv_mat[1,0]*px+inv_mat[1,1]*py+self.back_center[1]
+        positions[:,:,2] = inv_mat[2,0]*px+inv_mat[2,1]*py+self.back_center[2]
+        bounds = (px.min(), px.max(), py.min(), py.max())
+        
+        # We are likely adding on an odd cutting condition here
+        vectors = self.front_center - positions
+        n = vectors * vectors
+        n = na.sum(n, axis=2)**0.5
+        vectors /= n[:,:,None]
+        print vectors.shape, vectors.dtype, vectors.flags
+
+        vector_plane = au.VectorPlane(positions, vectors,
+                                      self.back_center, bounds, image,
+                                      self.unit_vectors[0],
+                                      self.unit_vectors[1])
+        return vector_plane
+
 class StereoPairCamera(Camera):
     def __init__(self, original_camera, relative_separation = 0.005):
         self.original_camera = original_camera



More information about the yt-svn mailing list