[yt-svn] commit/yt: 3 new changesets

Bitbucket commits-noreply at bitbucket.org
Sat Feb 25 16:56:11 PST 2012


3 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/2c975a9fcdd5/
changeset:   2c975a9fcdd5
branch:      yt
user:        MatthewTurk
date:        2011-11-03 20:39:15
summary:     Speedups for cKDtree, including moving the entire preconnect stage into Cython
and using memory buffers more clearly.
affected #:  2 files

diff -r 280982fcf8ab02fcdc5163240084b0b269941837 -r 2c975a9fcdd5921f7fe1d24495191c10b7454bd5 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -613,66 +613,73 @@
         chain_map = defaultdict(set)
         for i in xrange(max(self.chainID)+1):
             chain_map[i].add(i)
-        if self.tree == 'F':
+        yt_counters("preconnect kd tree search.")
+        if self.tree == 'C':
+            nn = self.nMerge + 2
+            rv = self.kdtree.chainHOP_preconnect(
+                self.chainID, self.density, self.densest_in_chain,
+                self.is_inside, self.search_again,
+                self.peakthresh, self.saddlethresh, nn, self.nMerge,
+                self.chain_map)
+            self.search_again = rv.astype("bool")
+            yt_counters("preconnect kd tree search.")
+        elif self.tree == 'F':
             # Plus 2 because we're looking for that neighbor, but only keeping 
             # nMerge + 1 neighbor tags, skipping ourselves.
             fKD.dist = na.empty(self.nMerge+2, dtype='float64')
             fKD.tags = na.empty(self.nMerge+2, dtype='int64')
             # We can change this here to make the searches faster.
             fKD.nn = self.nMerge + 2
-        elif self.tree == 'C':
-            nn = self.nMerge + 2
-        yt_counters("preconnect kd tree search.")
-        for i in xrange(self.size):
-            # Don't consider this particle if it's not part of a chain.
-            if self.chainID[i] < 0: continue
-            chainID_i = self.chainID[i]
-            # If this particle is in the padding, don't make a connection.
-            if not self.is_inside[i]: continue
-            # Find this particle's chain max_dens.
-            part_max_dens = self.densest_in_chain[chainID_i]
-            # We're only connecting >= peakthresh chains now.
-            if part_max_dens < self.peakthresh: continue
-            # Loop over nMerge closest nearest neighbors.
-            if self.tree == 'F':
-                fKD.qv = fKD.pos[:, i]
-                find_nn_nearest_neighbors()
-                NNtags = fKD.tags[:] - 1
-            elif self.tree == 'C':
-                qv = self.pos[i, :]
-                NNtags = self.kdtree.query(qv, nn)[1]
-            same_count = 0
-            for j in xrange(int(self.nMerge+1)):
-                thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
-                thisNN_chainID = self.chainID[thisNN]
-                # If our neighbor is in the same chain, move on.
-                # Move on if these chains are already connected:
-                if chainID_i == thisNN_chainID or \
-                        thisNN_chainID in chain_map[chainID_i]:
-                    same_count += 1
-                    continue
-                # Everything immediately below is for
-                # neighboring particles with a chainID. 
-                if thisNN_chainID >= 0:
-                    # Find thisNN's chain's max_dens.
-                    thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
-                    # We're only linking peakthresh chains
-                    if thisNN_max_dens < self.peakthresh: continue
-                    # Calculate the two groups boundary density.
-                    boundary_density = (self.density[thisNN] + self.density[i]) / 2.
-                    # Don't connect if the boundary is too low.
-                    if boundary_density < self.saddlethresh: continue
-                    # Mark these chains as related.
-                    chain_map[thisNN_chainID].add(chainID_i)
-                    chain_map[chainID_i].add(thisNN_chainID)
-            if same_count == self.nMerge + 1:
-                # All our neighbors are in the same chain already, so 
-                # we don't need to search again.
-                self.search_again[i] = False
-        try:
-            del NNtags
-        except UnboundLocalError:
-            pass
+            for i in xrange(self.size):
+                # Don't consider this particle if it's not part of a chain.
+                if self.chainID[i] < 0: continue
+                chainID_i = self.chainID[i]
+                # If this particle is in the padding, don't make a connection.
+                if not self.is_inside[i]: continue
+                # Find this particle's chain max_dens.
+                part_max_dens = self.densest_in_chain[chainID_i]
+                # We're only connecting >= peakthresh chains now.
+                if part_max_dens < self.peakthresh: continue
+                # Loop over nMerge closest nearest neighbors.
+                if self.tree == 'F':
+                    fKD.qv = fKD.pos[:, i]
+                    find_nn_nearest_neighbors()
+                    NNtags = fKD.tags[:] - 1
+                elif self.tree == 'C':
+                    qv = self.pos[i, :]
+                    NNtags = self.kdtree.query(qv, nn)[1]
+                same_count = 0
+                for j in xrange(int(self.nMerge+1)):
+                    thisNN = NNtags[j+1] # Don't consider ourselves at NNtags[0]
+                    thisNN_chainID = self.chainID[thisNN]
+                    # If our neighbor is in the same chain, move on.
+                    # Move on if these chains are already connected:
+                    if chainID_i == thisNN_chainID or \
+                            thisNN_chainID in chain_map[chainID_i]:
+                        same_count += 1
+                        continue
+                    # Everything immediately below is for
+                    # neighboring particles with a chainID. 
+                    if thisNN_chainID >= 0:
+                        # Find thisNN's chain's max_dens.
+                        thisNN_max_dens = self.densest_in_chain[thisNN_chainID]
+                        # We're only linking peakthresh chains
+                        if thisNN_max_dens < self.peakthresh: continue
+                        # Calculate the two groups boundary density.
+                        boundary_density = (self.density[thisNN] + self.density[i]) / 2.
+                        # Don't connect if the boundary is too low.
+                        if boundary_density < self.saddlethresh: continue
+                        # Mark these chains as related.
+                        chain_map[thisNN_chainID].add(chainID_i)
+                        chain_map[chainID_i].add(thisNN_chainID)
+                if same_count == self.nMerge + 1:
+                    # All our neighbors are in the same chain already, so 
+                    # we don't need to search again.
+                    self.search_again[i] = False
+            try:
+                del NNtags
+            except UnboundLocalError:
+                pass
         yt_counters("preconnect kd tree search.")
         # Recursively jump links until we get to a chain whose densest
         # link is to itself. At that point we've found the densest chain
@@ -680,7 +687,7 @@
         yt_counters("preconnect pregrouping.")
         final_chain_map = na.empty(max(self.chainID)+1, dtype='int64')
         removed = 0
-        for i in xrange(max(self.chainID)+1):
+        for i in xrange(self.chainID.max()+1):
             j = chain_count - i - 1
             densest_link = self._recurse_preconnected_links(chain_map, j)
             final_chain_map[j] = densest_link


diff -r 280982fcf8ab02fcdc5163240084b0b269941837 -r 2c975a9fcdd5921f7fe1d24495191c10b7454bd5 yt/utilities/spatial/ckdtree.pyx
--- a/yt/utilities/spatial/ckdtree.pyx
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -7,18 +7,21 @@
 
 import kdtree
 
-cdef double infinity = np.inf
+cdef extern from "stdlib.h":
+    # NOTE that size_t might not be int
+    void *alloca(int)
+
+cdef np.float64_t infinity = np.inf
 
 __all__ = ['cKDTree']
 
-
 # priority queue
 cdef union heapcontents:
     int intdata
     char* ptrdata
 
 cdef struct heapitem:
-    double priority
+    np.float64_t priority
     heapcontents contents
 
 cdef struct heap:
@@ -97,23 +100,23 @@
 
 
 # utility functions
-cdef inline double dmax(double x, double y):
+cdef inline np.float64_t dmax(np.float64_t x, np.float64_t y):
     if x>y:
         return x
     else:
         return y
-cdef inline double dabs(double x):
+cdef inline np.float64_t dabs(np.float64_t x):
     if x>0:
         return x
     else:
         return -x
-cdef inline double dmin(double x, double y):
+cdef inline np.float64_t dmin(np.float64_t x, np.float64_t y):
     if x<y:
         return x
     else:
         return y
-cdef inline double _distance_p(double*x,double*y,double p,int k,double upperbound,
-    double*period):
+cdef inline np.float64_t _distance_p(np.float64_t*x,np.float64_t*y,np.float64_t p,int k,np.float64_t upperbound,
+    np.float64_t*period):
     """Compute the distance between x and y
 
     Computes the Minkowski p-distance to the power p between two points.
@@ -123,7 +126,7 @@
     Periodicity added by S. Skory.
     """
     cdef int i
-    cdef double r, m
+    cdef np.float64_t r, m
     r = 0
     if p==infinity:
         for i in range(k):
@@ -151,9 +154,9 @@
 cdef struct innernode:
     int split_dim
     int n_points
-    double split
-    double* maxes
-    double* mins
+    np.float64_t split
+    np.float64_t* maxes
+    np.float64_t* mins
     innernode* less
     innernode* greater
 cdef struct leafnode:
@@ -161,14 +164,14 @@
     int n_points
     int start_idx
     int end_idx
-    double* maxes
-    double* mins
+    np.float64_t* maxes
+    np.float64_t* mins
 
 # this is the standard trick for variable-size arrays:
-# malloc sizeof(nodeinfo)+self.m*sizeof(double) bytes.
+# malloc sizeof(nodeinfo)+self.m*sizeof(np.float64_t) bytes.
 cdef struct nodeinfo:
     innernode* node
-    double side_distances[0]
+    np.float64_t side_distances[0]
 
 cdef class cKDTree:
     """kd-tree for quick nearest-neighbor lookup
@@ -201,7 +204,7 @@
     data : array-like, shape (n,m)
         The n data points of dimension m to be indexed. This array is 
         not copied unless this is necessary to produce a contiguous 
-        array of doubles, and so modifying this data will result in 
+        array of np.float64_ts, and so modifying this data will result in 
         bogus results.
     leafsize : positive integer
         The number of points at which the algorithm switches over to
@@ -211,21 +214,21 @@
 
     cdef innernode* tree 
     cdef readonly object data
-    cdef double* raw_data
+    cdef np.float64_t* raw_data
     cdef readonly int n, m
     cdef readonly int leafsize
     cdef readonly object maxes
-    cdef double* raw_maxes
+    cdef np.float64_t* raw_maxes
     cdef readonly object mins
-    cdef double* raw_mins
+    cdef np.float64_t* raw_mins
     cdef object indices
     cdef np.int64_t* raw_indices
     def __init__(cKDTree self, data, int leafsize=10):
-        cdef np.ndarray[double, ndim=2] inner_data
-        cdef np.ndarray[double, ndim=1] inner_maxes
-        cdef np.ndarray[double, ndim=1] inner_mins
+        cdef np.ndarray[np.float64_t, ndim=2] inner_data
+        cdef np.ndarray[np.float64_t, ndim=1] inner_maxes
+        cdef np.ndarray[np.float64_t, ndim=1] inner_mins
         cdef np.ndarray[np.int64_t, ndim=1] inner_indices
-        self.data = np.ascontiguousarray(data,dtype=np.double)
+        self.data = np.ascontiguousarray(data,dtype="float64")
         self.n, self.m = np.shape(self.data)
         self.leafsize = leafsize
         if self.leafsize<1:
@@ -235,27 +238,27 @@
         self.indices = np.ascontiguousarray(np.arange(self.n,dtype=np.int64))
 
         inner_data = self.data
-        self.raw_data = <double*>inner_data.data
+        self.raw_data = <np.float64_t*>inner_data.data
         inner_maxes = self.maxes
-        self.raw_maxes = <double*>inner_maxes.data
+        self.raw_maxes = <np.float64_t*>inner_maxes.data
         inner_mins = self.mins
-        self.raw_mins = <double*>inner_mins.data
+        self.raw_mins = <np.float64_t*>inner_mins.data
         inner_indices = self.indices
         self.raw_indices = <np.int64_t*>inner_indices.data
 
         self.tree = self.__build(0, self.n, self.raw_maxes, self.raw_mins)
 
-    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, double* maxes, double* mins):
+    cdef innernode* __build(cKDTree self, int start_idx, int end_idx, np.float64_t* maxes, np.float64_t* mins):
         cdef leafnode* n
         cdef innernode* ni
         cdef int i, j, t, p, q, d
-        cdef double size, split, minval, maxval
-        cdef double*mids
+        cdef np.float64_t size, split, minval, maxval
+        cdef np.float64_t*mids
         if end_idx-start_idx<=self.leafsize:
             n = <leafnode*>stdlib.malloc(sizeof(leafnode))
             # Skory
-            n.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            n.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            n.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            n.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 n.maxes[i] = maxes[i]
                 n.mins[i] = mins[i]
@@ -327,7 +330,7 @@
             # construct new node representation
             ni = <innernode*>stdlib.malloc(sizeof(innernode))
 
-            mids = <double*>stdlib.malloc(sizeof(double)*self.m)
+            mids = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 mids[i] = maxes[i]
             mids[d] = split
@@ -343,8 +346,8 @@
             ni.split_dim = d
             ni.split = split
             # Skory
-            ni.maxes = <double*>stdlib.malloc(sizeof(double)*self.m)
-            ni.mins = <double*>stdlib.malloc(sizeof(double)*self.m)
+            ni.maxes = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
+            ni.mins = <np.float64_t*>stdlib.malloc(sizeof(np.float64_t)*self.m)
             for i in range(self.m):
                 ni.maxes[i] = maxes[i]
                 ni.mins[i] = mins[i]
@@ -366,32 +369,32 @@
         self.__free_tree(self.tree)
 
     cdef void __query(cKDTree self, 
-            double*result_distances, 
-            long*result_indices, 
-            double*x, 
+            np.float64_t*result_distances, 
+            np.int64_t*result_indices, 
+            np.float64_t*x, 
             int k, 
-            double eps, 
-            double p, 
-            double distance_upper_bound,
-            double*period):
+            np.float64_t eps, 
+            np.float64_t p, 
+            np.float64_t distance_upper_bound,
+            np.float64_t*period):
         cdef heap q
         cdef heap neighbors
 
         cdef int i, j
-        cdef double t
+        cdef np.float64_t t
         cdef nodeinfo* inf
         cdef nodeinfo* inf2
-        cdef double d
-        cdef double m_left, m_right, m
-        cdef double epsfac
-        cdef double min_distance
-        cdef double far_min_distance
+        cdef np.float64_t d
+        cdef np.float64_t m_left, m_right, m
+        cdef np.float64_t epsfac
+        cdef np.float64_t min_distance
+        cdef np.float64_t far_min_distance
         cdef heapitem it, it2, neighbor
         cdef leafnode* node
         cdef innernode* inode
         cdef innernode* near
         cdef innernode* far
-        cdef double* side_distances
+        cdef np.float64_t* side_distances
 
         # priority queue for chasing nodes
         # entries are:
@@ -406,7 +409,7 @@
         heapcreate(&neighbors,k)
 
         # set up first nodeinfo
-        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+        inf = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
         inf.node = self.tree
         for i in range(self.m):
             inf.side_distances[i] = 0
@@ -500,7 +503,7 @@
                 # far child is further by an amount depending only
                 # on the split value; compute its distance and side_distances
                 # and push it on the queue if it's near enough
-                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(double)) 
+                inf2 = <nodeinfo*>stdlib.malloc(sizeof(nodeinfo)+self.m*sizeof(np.float64_t)) 
                 it2.contents.ptrdata = <char*> inf2
                 inf2.node = far
 
@@ -552,8 +555,8 @@
         heapdestroy(&q)
         heapdestroy(&neighbors)
 
-    def query(cKDTree self, object x, int k=1, double eps=0, double p=2, 
-            double distance_upper_bound=infinity, object period=None):
+    def query(cKDTree self, object x, int k=1, np.float64_t eps=0, np.float64_t p=2, 
+            np.float64_t distance_upper_bound=infinity, object period=None):
         """query(self, x, k=1, eps=0, p=2, distance_upper_bound=np.inf,
            period=None)
         
@@ -592,16 +595,16 @@
             Missing neighbors are indicated with self.n.
 
         """
-        cdef np.ndarray[long, ndim=2] ii
-        cdef np.ndarray[double, ndim=2] dd
-        cdef np.ndarray[double, ndim=2] xx
-        cdef np.ndarray[double, ndim=1] cperiod
+        cdef np.ndarray[np.int64_t, ndim=2] ii
+        cdef np.ndarray[np.float64_t, ndim=2] dd
+        cdef np.ndarray[np.float64_t, ndim=2] xx
+        cdef np.ndarray[np.float64_t, ndim=1] cperiod
         cdef int c
-        x = np.asarray(x).astype(np.double)
+        x = np.asarray(x).astype("float64")
         if period is None:
             period = np.array([np.inf]*self.m)
         else:
-            period = np.asarray(period).astype(np.double)
+            period = np.asarray(period).astype("float64")
         cperiod = np.ascontiguousarray(period)
         if np.shape(x)[-1] != self.m:
             raise ValueError("x must consist of vectors of length %d but has shape %s" % (self.m, np.shape(x)))
@@ -616,20 +619,20 @@
         n = np.prod(retshape)
         xx = np.reshape(x,(n,self.m))
         xx = np.ascontiguousarray(xx)
-        dd = np.empty((n,k),dtype=np.double)
+        dd = np.empty((n,k),dtype="float64")
         dd.fill(infinity)
-        ii = np.empty((n,k),dtype=np.long)
+        ii = np.empty((n,k),dtype="int64")
         ii.fill(self.n)
         for c in range(n):
             self.__query(
-                    (<double*>dd.data)+c*k,
-                    (<long*>ii.data)+c*k,
-                    (<double*>xx.data)+c*self.m, 
+                    (<np.float64_t*>dd.data)+c*k,
+                    (<np.int64_t*>ii.data)+c*k,
+                    (<np.float64_t*>xx.data)+c*self.m, 
                     k, 
                     eps,
                     p, 
                     distance_upper_bound,
-                    <double*>cperiod.data)
+                    <np.float64_t*>cperiod.data)
         if single:
             if k==1:
                 return dd[0,0], ii[0,0]
@@ -641,7 +644,10 @@
             else:
                 return np.reshape(dd,retshape+(k,)), np.reshape(ii,retshape+(k,))
 
-    def chainHOP_get_dens(cKDTree self, object mass, int num_neighbors=65, \
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def chainHOP_get_dens(cKDTree self, object omass, int num_neighbors=65, \
             int nMerge=6):
         """ query the tree for the nearest neighbors, to get the density
             of particles for chainHOP.
@@ -669,38 +675,46 @@
         
         """
         
-        # We're no longer returning all the tags in this step.
+        # We're no np.int64_ter returning all the tags in this step.
         # We do it chunked, in find_chunk_nearest_neighbors.
-        #cdef np.ndarray[long, ndim=2] tags
-        cdef np.ndarray[double, ndim=1] dens
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
+        #cdef np.ndarray[np.int64_t, ndim=2] tags
+        cdef np.ndarray[np.float64_t, ndim=1] dens
         cdef int i, pj, j
-        cdef double ih2, fNorm, r2, rs
+        cdef np.float64_t ih2, fNorm, r2, rs
         
-        #tags = np.empty((self.n, nMerge), dtype=np.long)
-        dens = np.empty(self.n, dtype=np.double)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        # Need to start out with zeros before we start adding to it.
-        dens.fill(0.0)
+        #tags = np.empty((self.n, nMerge), dtype="int64")
+        dens = np.zeros(self.n, dtype="float64")
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
 
-        mass = np.array(mass).astype(np.double)
-        mass = np.ascontiguousarray(mass)
+        cdef np.ndarray[np.float64_t, ndim=1] mass = np.array(omass).astype("float64")
+        cdef np.float64_t lpi = np.pi
         
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(self.n):
-            query = self.data[i]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
+            for j in range(self.m):
+                query[j] = local_data[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
             
             #calculate the density for this particle
-            ih2 = 4.0/np.max(dist_temp)
-            fNorm = 0.5*np.sqrt(ih2)*ih2/np.pi
+            ih2 = -1
+            for j in range(num_neighbors):
+                ih2 = dmax(ih2, dist_temp[j])
+            ih2 = 4.0/ih2
+            fNorm = 0.5*(ih2**1.5)/lpi
             for j in range(num_neighbors):
                 pj = tags_temp[j]
                 r2 = dist_temp[j] * ih2
-                rs = 2.0 - np.sqrt(r2)
+                rs = 2.0 - (r2**0.5)
                 if (r2 < 1.0):
                     rs = (1.0 - 0.75*rs*r2)
                 else:
@@ -715,6 +729,8 @@
         #return (dens, tags)
         return dens
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def find_chunk_nearest_neighbors(cKDTree self, int start, int finish, \
         int num_neighbors=65):
         """ query the tree in chunks, between start and finish, recording the
@@ -738,21 +754,99 @@
         
         """
         
-        cdef np.ndarray[long, ndim=2] chunk_tags
-        cdef np.ndarray[double, ndim=1] query
-        cdef np.ndarray[long, ndim=1] tags_temp
-        cdef np.ndarray[double, ndim=1] dist_temp
-        cdef int i
+        cdef np.ndarray[np.int64_t, ndim=2] chunk_tags
+        cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
+        cdef int i, j
         
-        chunk_tags = np.empty((finish-start, num_neighbors), dtype=np.long)
-        query = np.empty(self.m, dtype=np.double)
-        tags_temp = np.empty(num_neighbors, dtype=np.long)
-        dist_temp = np.empty(num_neighbors, dtype=np.double)
-        
+        chunk_tags = np.empty((finish-start, num_neighbors), dtype="int64")
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * num_neighbors)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * num_neighbors)
+        cdef np.float64_t period[3]
+        for i in range(3): period[i] = 1.0
+
         for i in range(finish-start):
-            query = self.data[i+start]
-            (dist_temp, tags_temp) = self.query(query, k=num_neighbors, period=[1.]*3)
-            chunk_tags[i,:] = tags_temp[:]
+            for j in range(self.m):
+                query[j] = local_data[i+start,j]
+            self.__query(dist_temp, tags_temp,
+                         query, num_neighbors, 0.0, 
+                         2, infinity, period)
+            for j in range(num_neighbors):
+                chunk_tags[i,j] = tags_temp[j]
         
         return chunk_tags
 
+    def chainHOP_preconnect(self, np.ndarray[np.int64_t, ndim=1] chainID,
+                                  np.ndarray[np.float64_t, ndim=1] density,
+                                  np.ndarray[np.float64_t, ndim=1] densest_in_chain,
+                                  np.ndarray bis_inside,
+                                  np.ndarray bsearch_again,
+                                  np.float64_t peakthresh,
+                                  np.float64_t saddlethresh,
+                                  int nn, int nMerge,
+                                  object chain_map):
+        cdef np.ndarray[np.int32_t, ndim=1] is_inside
+        cdef np.ndarray[np.int32_t, ndim=1] search_again
+        cdef np.ndarray[np.float64_t, ndim=2] pos 
+        cdef np.int64_t thisNN, thisNN_chainID, same_count
+        cdef np.float64_t *query = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * self.m)
+        cdef np.float64_t *dist_temp = <np.float64_t *> alloca(
+                    sizeof(np.float64_t) * nn)
+        cdef np.int64_t *tags_temp = <np.int64_t *> alloca(
+                    sizeof(np.int64_t) * nn)
+        cdef np.float64_t period[3], thisNN_max_dens, boundary_density
+        cdef int i, j, npart, chainID_i, part_mas_dens
+        is_inside = bis_inside.astype("int32")
+        search_again = bsearch_again.astype("int32")
+        pos = self.data
+        npart = pos.shape[0]
+        for i in range(3): period[i] = 1.0
+        for i in xrange(npart):
+            # Don't consider this particle if it's not part of a chain.
+            if chainID[i] < 0: continue
+            chainID_i = chainID[i]
+            # If this particle is in the padding, don't make a connection.
+            if not is_inside[i]: continue
+            # Find this particle's chain max_dens.
+            part_max_dens = densest_in_chain[chainID_i]
+            # We're only connecting >= peakthresh chains now.
+            if part_max_dens < peakthresh: continue
+            # Loop over nMerge closest nearest neighbors.
+            for j in range(self.m):
+                query[j] = pos[i,j]
+            self.__query(dist_temp, tags_temp,
+                         query, nn, 0.0, 
+                         2, infinity, period)
+            same_count = 0
+            for j in xrange(int(nMerge+1)):
+                thisNN = tags_temp[j+1] # Don't consider ourselves at tags_temp[0]
+                thisNN_chainID = chainID[thisNN]
+                # If our neighbor is in the same chain, move on.
+                # Move on if these chains are already connected:
+                if chainID_i == thisNN_chainID or \
+                        thisNN_chainID in chain_map[chainID_i]:
+                    same_count += 1
+                    continue
+                # Everything immediately below is for
+                # neighboring particles with a chainID. 
+                if thisNN_chainID >= 0:
+                    # Find thisNN's chain's max_dens.
+                    thisNN_max_dens = densest_in_chain[thisNN_chainID]
+                    # We're only linking peakthresh chains
+                    if thisNN_max_dens < peakthresh: continue
+                    # Calculate the two groups boundary density.
+                    boundary_density = (density[thisNN] + density[i]) / 2.
+                    # Don't connect if the boundary is too low.
+                    if boundary_density < saddlethresh: continue
+                    # Mark these chains as related.
+                    chain_map[thisNN_chainID].add(chainID_i)
+                    chain_map[chainID_i].add(thisNN_chainID)
+            if same_count == nMerge + 1:
+                # All our neighbors are in the same chain already, so 
+                # we don't need to search again.
+                search_again[i] = 0
+        return search_again



https://bitbucket.org/yt_analysis/yt/changeset/44fbf08affb7/
changeset:   44fbf08affb7
branch:      yt
user:        MatthewTurk
date:        2011-11-03 23:05:09
summary:     Fixed a few bugs, decided to make kD-tree actually only 3D.
affected #:  2 files

diff -r 2c975a9fcdd5921f7fe1d24495191c10b7454bd5 -r 44fbf08affb7a88018e17120eebf362e0391dae4 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -388,7 +388,7 @@
             self.pos[self.psize:, 2] = self.zpos_pad
             del self.xpos_pad, self.ypos_pad, self.zpos_pad
             gc.collect()
-            self.kdtree = cKDTree(self.pos, leafsize = 32)
+            self.kdtree = cKDTree(self.pos, leafsize = 64)
         self.__max_memory()
         yt_counters("init kd tree")
 
@@ -620,7 +620,7 @@
                 self.chainID, self.density, self.densest_in_chain,
                 self.is_inside, self.search_again,
                 self.peakthresh, self.saddlethresh, nn, self.nMerge,
-                self.chain_map)
+                chain_map)
             self.search_again = rv.astype("bool")
             yt_counters("preconnect kd tree search.")
         elif self.tree == 'F':


diff -r 2c975a9fcdd5921f7fe1d24495191c10b7454bd5 -r 44fbf08affb7a88018e17120eebf362e0391dae4 yt/utilities/spatial/ckdtree.pyx
--- a/yt/utilities/spatial/ckdtree.pyx
+++ b/yt/utilities/spatial/ckdtree.pyx
@@ -2,7 +2,7 @@
 # Released under the scipy license
 import numpy as np
 cimport numpy as np
-cimport stdlib
+cimport libc.stdlib as stdlib
 cimport cython
 
 import kdtree
@@ -140,6 +140,12 @@
             r += m
             if r>upperbound:
                 return r
+    elif p==2:
+        for i in range(k):
+            m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
+            r += m*m
+            if r>upperbound:
+                return r
     else:
         for i in range(k):
             m = dmin(dabs(x[i] - y[i]), period[i] - dabs(x[i] - y[i]))
@@ -377,14 +383,17 @@
             np.float64_t p, 
             np.float64_t distance_upper_bound,
             np.float64_t*period):
+        assert(p == 2)
+        assert(eps == 0.0)
+        assert(distance_upper_bound == infinity)
         cdef heap q
         cdef heap neighbors
 
-        cdef int i, j
-        cdef np.float64_t t
+        cdef int i, j, i2, j2
+        cdef np.float64_t t, y
         cdef nodeinfo* inf
         cdef nodeinfo* inf2
-        cdef np.float64_t d
+        cdef np.float64_t d, di
         cdef np.float64_t m_left, m_right, m
         cdef np.float64_t epsfac
         cdef np.float64_t min_distance
@@ -420,28 +429,15 @@
                 t = self.raw_mins[i]-x[i]
                 if t>inf.side_distances[i]:
                     inf.side_distances[i] = t
-            if p!=1 and p!=infinity:
-                inf.side_distances[i]=inf.side_distances[i]**p
+            inf.side_distances[i]=inf.side_distances[i]*inf.side_distances[i]
 
         # compute first distance
         min_distance = 0.
         for i in range(self.m):
-            if p==infinity:
-                min_distance = dmax(min_distance,inf.side_distances[i])
-            else:
-                min_distance += inf.side_distances[i]
+            min_distance += inf.side_distances[i]
 
         # fiddle approximation factor
-        if eps==0:
-            epsfac=1
-        elif p==infinity:
-            epsfac = 1/(1+eps)
-        else:
-            epsfac = 1/(1+eps)**p
-
-        # internally we represent all distances as distance**p
-        if p!=infinity and distance_upper_bound!=infinity:
-            distance_upper_bound = distance_upper_bound**p
+        epsfac=1
 
         while True:
             if inf.node.split_dim==-1:
@@ -449,10 +445,11 @@
 
                 # brute-force
                 for i in range(node.start_idx,node.end_idx):
-                    d = _distance_p(
-                            self.raw_data+self.raw_indices[i]*self.m,
-                            x,p,self.m,distance_upper_bound,period)
-                        
+                    d = 0.0
+                    for i2 in range(self.m):
+                        y = self.raw_data[self.raw_indices[i]*self.m + i2]
+                        di = dmin(dabs(x[i2] - y), period[i2] - dabs(x[i2] - y))
+                        d += di*di
                     if d<distance_upper_bound:
                         # replace furthest neighbor
                         if neighbors.n==k:
@@ -520,17 +517,9 @@
 
                 # one side distance changes
                 # we can adjust the minimum distance without recomputing
-                if p == infinity:
-                    # we never use side_distances in the l_infinity case
-                    # inf2.side_distances[inode.split_dim] = dabs(inode.split-x[inode.split_dim])
-                    far_min_distance = dmax(min_distance, m)
-                elif p == 1:
-                    inf2.side_distances[inode.split_dim] = m
-                    far_min_distance = dmax(min_distance, m)
-                else:
-                    inf2.side_distances[inode.split_dim] = m**p
-                    #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
-                    far_min_distance = m**p
+                inf2.side_distances[inode.split_dim] = m*m
+                #far_min_distance = min_distance - inf.side_distances[inode.split_dim] + inf2.side_distances[inode.split_dim]
+                far_min_distance = m*m
 
                 it2.priority = far_min_distance
 
@@ -547,10 +536,7 @@
         for i in range(neighbors.n-1,-1,-1):
             neighbor = heappop(&neighbors) # FIXME: neighbors may be realloced
             result_indices[i] = neighbor.contents.intdata
-            if p==1 or p==infinity:
-                result_distances[i] = -neighbor.priority
-            else:
-                result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
+            result_distances[i] = (-neighbor.priority) #**(1./p) S. Skory
 
         heapdestroy(&q)
         heapdestroy(&neighbors)
@@ -687,7 +673,7 @@
         cdef np.ndarray[np.float64_t, ndim=2] local_data = self.data
 
         cdef np.ndarray[np.float64_t, ndim=1] mass = np.array(omass).astype("float64")
-        cdef np.float64_t lpi = np.pi
+        cdef np.float64_t ipi = 1.0/np.pi
         
         cdef np.float64_t *query = <np.float64_t *> alloca(
                     sizeof(np.float64_t) * self.m)
@@ -710,7 +696,7 @@
             for j in range(num_neighbors):
                 ih2 = dmax(ih2, dist_temp[j])
             ih2 = 4.0/ih2
-            fNorm = 0.5*(ih2**1.5)/lpi
+            fNorm = 0.5*(ih2**1.5)*ipi
             for j in range(num_neighbors):
                 pj = tags_temp[j]
                 r2 = dist_temp[j] * ih2



https://bitbucket.org/yt_analysis/yt/changeset/995fd6c8ef5b/
changeset:   995fd6c8ef5b
branch:      yt
user:        MatthewTurk
date:        2011-11-23 19:16:22
summary:     Merging latest changes
affected #:  91 files

diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -8,6 +8,7 @@
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
+yt/utilities/spatial/ckdtree.c
 syntax: glob
 *.pyc
 .*.swp


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -21,6 +21,9 @@
    be "na.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
+ * When writing a subclass, use the super built-in to access the super class,
+   rather than explicitly. Ex: "super(SpecialGrid, self).__init__()" rather than
+   "SpecialGrid.__init__()".
  * Doc strings should describe input, output, behavior, and any state changes
    that occur on an object.  See the file `doc/docstring_example.txt` for a
    fiducial example of a docstring.


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -268,12 +268,21 @@
     cd ..
 }
 
+if type -P wget &>/dev/null 
+then
+    echo "Using wget"
+    export GETFILE="wget -nv"
+else
+    echo "Using curl"
+    export GETFILE="curl -sSO"
+fi
+
 function get_enzotools
 {
     echo "Downloading $1 from yt-project.org"
     [ -e $1 ] && return
-    wget -nv "http://yt-project.org/dependencies/$1" || do_exit
-    wget -nv "http://yt-project.org/dependencies/$1.md5" || do_exit
+    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+    ${GETFILE} "http://yt-project.org/dependencies/$1.md5" || do_exit
     ( which md5sum &> /dev/null ) || return # return if we don't have md5sum
     ( md5sum -c $1.md5 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -7,12 +7,14 @@
 
 doc = """\
 
-Welcome to yt!
+==================
+| Welcome to yt! |
+==================
 
 """
 
 try:
-    import IPython.Shell
+    import IPython
 except:
     print 'ipython is not available. using default python interpreter.'
     import code
@@ -20,7 +22,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if "DISPLAY" in os.environ:
+if IPython.__version__.startswith("0.10"):
+    api_version = '0.10'
+elif IPython.__version__.startswith("0.11"):
+    api_version = '0.11'
+
+if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
     ipbackends = dict(Qt4 = IPython.Shell.IPShellMatplotlibQt4,
                       WX  = IPython.Shell.IPShellMatplotlibWX,
@@ -32,8 +39,15 @@
         ip_shell = ipbackends[bend](user_ns=namespace)
     except KeyError:
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+elif api_version == "0.10":
+    ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+elif api_version == "0.11":
+    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+    ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
+                    display_banner = True)
+    if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
 else:
-    ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
+    raise RuntimeError
 
 # The rest is a modified version of the IPython default profile code
 
@@ -58,220 +72,22 @@
 # Most of your config files and extensions will probably start with this import
 
 #import IPython.ipapi
-ip = ip_shell.IP.getapi()
+if api_version == "0.10":
+    ip = ip_shell.IP.getapi()
+    try_next = IPython.ipapi.TryNext
+    kwargs = dict(sys_exit=1, banner=doc)
+elif api_version == "0.11":
+    ip = ip_shell
+    try_next = IPython.core.error.TryNext
+    kwargs = dict()
 
-# You probably want to uncomment this if you did %upgrade -nolegacy
-# import ipy_defaults    
-
-import os   
-import glob
-import itertools
-
-def main():   
-
-    # uncomment if you want to get ipython -p sh behaviour
-    # without having to use command line switches  
-    # import ipy_profile_sh
-
-    # Configure your favourite editor?
-    # Good idea e.g. for %edit os.path.isfile
-
-    #import ipy_editors
-    
-    # Choose one of these:
-    
-    #ipy_editors.scite()
-    #ipy_editors.scite('c:/opt/scite/scite.exe')
-    #ipy_editors.komodo()
-    #ipy_editors.idle()
-    # ... or many others, try 'ipy_editors??' after import to see them
-    
-    # Or roll your own:
-    #ipy_editors.install_editor("c:/opt/jed +$line $file")
-    
-    
-    o = ip.options
-    # An example on how to set options
-    #o.autocall = 1
-    o.system_verbose = 0
-    
-    #import_all("os sys")
-    #execf('~/_ipython/ns.py')
-
-
-    # -- prompt
-    # A different, more compact set of prompts from the default ones, that
-    # always show your current location in the filesystem:
-
-    #o.prompt_in1 = r'\C_LightBlue[\C_LightCyan\Y2\C_LightBlue]\C_Normal\n\C_Green|\#>'
-    #o.prompt_in2 = r'.\D: '
-    #o.prompt_out = r'[\#] '
-    
-    # Try one of these color settings if you can't read the text easily
-    # autoexec is a list of IPython commands to execute on startup
-    #o.autoexec.append('%colors LightBG')
-    #o.autoexec.append('%colors NoColor')
-    #o.autoexec.append('%colors Linux')
-    
-    # for sane integer division that converts to float (1/2 == 0.5)
-    #o.autoexec.append('from __future__ import division')
-    
-    # For %tasks and %kill
-    #import jobctrl 
-    
-    # For autoreloading of modules (%autoreload, %aimport)    
-    #import ipy_autoreload
-    
-    # For winpdb support (%wdb)
-    #import ipy_winpdb
-    
-    # For bzr completer, requires bzrlib (the python installation of bzr)
-    #ip.load('ipy_bzr')
-    
-    # Tab completer that is not quite so picky (i.e. 
-    # "foo".<TAB> and str(2).<TAB> will work). Complete 
-    # at your own risk!
-    #import ipy_greedycompleter
-    
-from UserDict import UserDict
-class ParameterFileDict(UserDict):
-    def __init__(self):
-        # We accept no contributions
-        UserDict.__init__(self)
-        self._key_numbers = {}
-        self._nn = 0
-    def __setitem__(self, key, value):
-        if isinstance(key, int): raise KeyError
-        UserDict.__setitem__(self, key, value)
-        self._key_numbers[self._nn] = key
-        self._nn += 1
-    def __getitem__(self, key):
-        if isinstance(key, int):
-            return self[self._key_numbers[key]]
-        return UserDict.__getitem__(self, key)
-    def __iter__(self):
-        return itertools.chain(self.field_data.iterkeys(),
-                        self._key_numbers.iterkeys())
-    def __repr__(self):
-        s = "{" + ", \n ".join(
-                [" '(%s, %s)': %s" % (i, self._key_numbers[i], self[i])
-                    for i in sorted(self._key_numbers)]) + "}"
-        return s
-    def has_key(self, key):
-        return self.field_data.has_key(key) or self._key_numbers.has_key(key)
-    def keys(self):
-        return self.field_data.key(key) + self._key_numbers.key(key)
-
-pfs = ParameterFileDict()
-pcs = []
-ip.user_ns['pf'] = None
-ip.user_ns['pfs'] = pfs
-ip.user_ns['pc'] = None
-ip.user_ns['pcs'] = pcs
 ip.ex("from yt.mods import *")
 
-def do_pfall(self, arg):
-    if arg.strip() == "": arg = 0
-    for i in range(int(arg)+1):
-        for f in sorted(glob.glob("".join(["*/"]*i) + "*.hierarchy" )):
-            #print i, f
-            fn = f[:-10]
-            # Make this a bit smarter
-            ip.user_ns['pfs'][fn] = EnzoStaticOutput(fn)
-    ip.ex("print pfs")
-
-ip.expose_magic("pfall", do_pfall)
-
-def _parse_pf(arg):
-    if arg.strip() == "":
-        if ip.user_ns.get('pf', None) is not None:
-            return ip.user_ns['pf']
-        elif len(pfs) > 0:
-            return pfs[0]
-    else:
-        if pfs.has_key(arg):
-            return pfs[arg]
-        if pfs.has_key(int(arg)):
-            return pfs[int(arg)]
-        return EnzoStaticOutput(arg)
-    raise KeyError
-        
-def do_slice(self, arg):
-    pc = None
-    if len(arg.split()) == 3:
-        pfn, field, axis = arg.split()
-        pf = _parse_pf(arg.split()[0])
-    elif len(arg.split()) == 2:
-        field, axis = arg.split()
-        pf = _parse_pf("")
-        if ip.user_ns.get('pc', None) is not None and \
-           ip.user_ns['pc'].parameter_file is pf:
-            pf = ip.user_ns['pc']
-    else:
-        print "Need either two or three arguments."
-        return
-    axis = int(axis)
-    if pc is None: pc = PlotCollectionInteractive(pf)
-    pc.add_slice(field, axis)
-    print "Setting pcs[%s] = New PlotCollection" % len(pcs)
-    ip.user_ns['pcs'].append(pc)
-    if ip.user_ns.get('pc', None) is None: ip.user_ns['pc'] = pc
-    return pc
-
-ip.expose_magic("pcslicer", do_slice)
-
-def do_width(self, arg):
-    if ip.user_ns.get("pc", None) is None:
-        print "No 'pc' defined"
-        return
-    if len(arg.split()) == 2:
-        w, u = arg.split()
-    else:
-        w, u = arg, '1'
-    ip.user_ns['pc'].set_width(float(w), u)
-ip.expose_magic("width", do_width)
-
-def do_zoom(self, arg):
-    if ip.user_ns.get("pc", None) is None:
-        print "No 'pc' defined"
-        return
-    pc = ip.user_ns['pc']
-    w = None
-    for p in pc:
-        if hasattr(p, 'width'): w = p.width
-    if w is None: print "No zoomable plots defined"
-    w /= float(arg)
-    pc.set_width(w, '1')
-ip.expose_magic("zoom", do_zoom)
-    
-def do_setup_pf(self, arg):
-    if pfs.has_key(arg): ip.user_ns['pf'] = pfs[arg]
-    iarg = -1
-    try:
-        iarg = int(arg)
-    except ValueError: pass
-    if pfs.has_key(iarg): ip.user_ns['pf'] = pfs[iarg]
-    print ip.user_ns['pf']
-    
-ip.expose_magic("gpf", do_setup_pf)
-
-# some config helper functions you can use 
-def import_all(modules):
-    """ Usage: import_all("os sys") """ 
-    for m in modules.split():
-        ip.ex("from %s import *" % m)
-        
-def execf(fname):
-    """ Execute a file in user namespace """
-    ip.ex('execfile("%s")' % os.path.expanduser(fname))
-
-#main()
-
-
 # Now we add some tab completers, in the vein of:
 # http://pymel.googlecode.com/svn/trunk/tools/ipymel.py
 # We'll start with some fields.
 
+import re
 def yt_fieldname_completer(self, event):
     """Match dictionary completions"""
     #print "python_matches", event.symbol
@@ -284,7 +100,7 @@
     m = re.match(r"(\S+(\.\w+)*)\[[\'\\\"](\w*)$", text)
 
     if not m:
-        raise IPython.ipapi.TryNext 
+        raise try_next
     
     expr, attr = m.group(1, 3)
     #print "COMPLETING ON ", expr, attr
@@ -308,8 +124,8 @@
         return all_fields
 
 
-    raise IPython.ipapi.TryNext 
+    raise try_next
 
 ip.set_hook('complete_command', yt_fieldname_completer , re_key = ".*" )
 
-ip_shell.mainloop(sys_exit=1,banner=doc)
+ip_shell.mainloop(**kwargs)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d setup.py
--- a/setup.py
+++ b/setup.py
@@ -20,15 +20,10 @@
 try:
     import Cython
 except ImportError as e:
-    print "Received error on importing Cython:"
-    print e
-    print "Now attempting to install Cython"
-    import pip
-    rv = pip.main(["install",
-              "http://yt-project.org/dependencies/Cython-latest.tar.gz"])
-    if rv == 1:
-        print "Unable to install Cython.  Please report this bug to yt-users."
-        sys.exit(1)
+    print "Cython is a build-time requirement for the source tree of yt."
+    print "Please either install yt from a provided, release tarball,"
+    print "or install Cython (version 0.15 or higher)."
+    sys.exit(1)
 
 ######
 # This next bit comes from Matthew Brett, to get Cython working with NumPy


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d tests/object_field_values.py
--- a/tests/object_field_values.py
+++ b/tests/object_field_values.py
@@ -73,12 +73,42 @@
         YTStaticOutputTest.setup(self)
         known_objects[self.object_name](self)
 
+class YTExtractIsocontoursTest(YTFieldValuesTest):
+    def run(self):
+        val = self.data_object.quantities["WeightedAverageQuantity"](
+            "Density", "Density")
+        rset = self.data_object.extract_isocontours("Density",
+            val, rescale = False, sample_values = "Temperature")
+        self.result = rset
+
+    def compare(self, old_result):
+        if self.result[0].size == 0 and old_result[0].size == 0:
+            return True
+        self.compare_array_delta(self.result[0].ravel(),
+                                 old_result[0].ravel(), 1e-7)
+        self.compare_array_delta(self.result[1], old_result[1], 1e-7)
+
+class YTIsocontourFluxTest(YTFieldValuesTest):
+    def run(self):
+        val = self.data_object.quantities["WeightedAverageQuantity"](
+            "Density", "Density")
+        flux = self.data_object.calculate_isocontour_flux(
+           "Density", val, "x-velocity", "y-velocity", "z-velocity")
+        self.result = flux
+
+    def compare(self, old_result):
+        self.compare_value_delta(self.result, old_result, 1e-7)
+
 for object_name in known_objects:
     for field in field_list + particle_field_list:
         if "cut_region" in object_name and field in particle_field_list:
             continue
         create_test(YTFieldValuesTest, "%s_%s" % (object_name, field),
                     field = field, object_name = object_name)
+    create_test(YTExtractIsocontoursTest, "%s" % (object_name),
+                object_name = object_name)
+    create_test(YTIsocontourFluxTest, "%s" % (object_name),
+                object_name = object_name)
     
 class YTDerivedQuantityTest(YTStaticOutputTest):
     def setup(self):
@@ -140,4 +170,3 @@
                     "%s_%s" % (object_name, field),
                     field_name = field, 
                     object_name = object_name)
-


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -1,7 +1,7 @@
 import matplotlib; matplotlib.use('Agg')
 from yt.config import ytcfg
-ytcfg["yt","loglevel"] = "50"
-ytcfg["yt","serialize"] = "False"
+ytcfg["yt", "loglevel"] = "50"
+ytcfg["yt", "serialize"] = "False"
 
 from yt.utilities.answer_testing.api import \
     RegressionTestRunner, clear_registry, create_test, \
@@ -58,23 +58,23 @@
         my_hash = "UNKNOWN%s" % (time.time())
     parser = optparse.OptionParser()
     parser.add_option("-f", "--parameter-file", dest="parameter_file",
-                      default = os.path.join(cwd, "DD0010/moving7_0010"),
-                      help = "The parameter file value to feed to 'load' to test against",
-                      )
+                      default=os.path.join(cwd, "DD0010/moving7_0010"),
+                      help="The parameter file value to feed to 'load' to test against")
     parser.add_option("-l", "--list", dest="list_tests", action="store_true",
-                      default = False, help = "List all tests and then exit")
+                      default=False, help="List all tests and then exit")
     parser.add_option("-t", "--tests", dest="test_pattern", default="*",
-                      help = "The test name pattern to match.  Can include wildcards.")
+                      help="The test name pattern to match.  Can include wildcards.")
     parser.add_option("-o", "--output", dest="storage_dir",
                       default=test_storage_directory,
-                      help = "Base directory for storing test output.")
+                      help="Base directory for storing test output.")
     parser.add_option("-c", "--compare", dest="compare_name",
                       default=None,
-                      help = "The name against which we will compare")
+                      help="The name against which we will compare")
     parser.add_option("-n", "--name", dest="this_name",
                       default=my_hash,
-                      help = "The name we'll call this set of tests")
+                      help="The name we'll call this set of tests")
     opts, args = parser.parse_args()
+
     if opts.list_tests:
         tests_to_run = []
         for m, vals in mapping.items():
@@ -86,10 +86,13 @@
         tests = list(set(tests_to_run))
         print "\n    ".join(tests)
         sys.exit(0)
+
+    # Load the test pf and make sure it's good.
     pf = load(opts.parameter_file)
     if pf is None:
         print "Couldn't load the specified parameter file."
         sys.exit(1)
+
     # Now we modify our compare name and self name to include the pf.
     compare_id = opts.compare_name
     watcher = None
@@ -97,14 +100,17 @@
         compare_id += "_%s_%s" % (pf, pf._hash())
         watcher = Xunit()
     this_id = opts.this_name + "_%s_%s" % (pf, pf._hash())
+
     rtr = RegressionTestRunner(this_id, compare_id,
-            results_path = opts.storage_dir,
-            compare_results_path = opts.storage_dir,
-            io_log = [opts.parameter_file])
+                               results_path=opts.storage_dir,
+                               compare_results_path=opts.storage_dir,
+                               io_log=[opts.parameter_file])
+
     rtr.watcher = watcher
     tests_to_run = []
     for m, vals in mapping.items():
         new_tests = fnmatch.filter(vals, opts.test_pattern)
+
         if len(new_tests) == 0: continue
         load_tests(m, cwd)
         keys = set(registry_entries())


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1323,6 +1323,7 @@
     _name = "Loaded"
     
     def __init__(self, pf, basename):
+        ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self._groups = []
         self.basename = basename


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -31,7 +31,7 @@
 from yt.funcs import *
 
 from yt.analysis_modules.halo_finding.halo_objects import \
-    FOFHaloFinder, HaloFinder
+    FOFHaloFinder, HaloFinder, parallelHF
 from yt.analysis_modules.halo_profiler.multi_halo_profiler import \
     HaloProfiler
 from yt.convenience import load
@@ -464,6 +464,7 @@
                         parent_masses = na.concatenate((parent_masses, thisMasses))
                         parent_halos = na.concatenate((parent_halos, 
                             na.ones(thisIDs.size, dtype='int32') * gID))
+                        del thisIDs, thisMasses
                     h5fp.close()
             
             # Sort the arrays by particle index in ascending order.
@@ -495,6 +496,7 @@
                     child_masses = na.concatenate((child_masses, thisMasses))
                     child_halos = na.concatenate((child_halos, 
                         na.ones(thisIDs.size, dtype='int32') * gID))
+                    del thisIDs, thisMasses
                 h5fp.close()
         
         # Sort the arrays by particle index.
@@ -548,6 +550,7 @@
         parent_halos_tosend = parent_halos[parent_send]
         child_IDs_tosend = child_IDs[child_send]
         child_halos_tosend = child_halos[child_send]
+        del parent_send, child_send
         
         parent_IDs_tosend = self.comm.par_combine_object(parent_IDs_tosend,
                 datatype="array", op="cat")
@@ -651,6 +654,11 @@
             #values = tuple(values)
             self.write_values.append(values)
             self.write_values_dict[parent_currt][parent_halo] = values
+
+        # Clean up.
+        del parent_IDs, parent_masses, parent_halos
+        del parent_IDs_tosend, parent_masses_tosend
+        del parent_halos_tosend, child_IDs_tosend, child_halos_tosend
         
         return (child_IDs, child_masses, child_halos)
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -35,6 +35,7 @@
                  function=None, clump_info=None):
         self.parent = parent
         self.data = data
+        self.quantities = data.quantities
         self.field = field
         self.min_val = self.data[field].min()
         self.max_val = self.data[field].max()
@@ -167,6 +168,7 @@
     # unreliable in the unpickling
     for child in children: child.parent = obj
     obj.data = data[1] # Strip out the PF
+    obj.quantities = obj.data.quantities
     if obj.parent is None: return (data[0], obj)
     return obj
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/analysis_modules/light_cone/light_cone.py
--- a/yt/analysis_modules/light_cone/light_cone.py
+++ b/yt/analysis_modules/light_cone/light_cone.py
@@ -42,7 +42,7 @@
 from .light_cone_projection import _light_cone_projection
 
 class LightCone(EnzoSimulation):
-    def __init__(self, EnzoParameterFile, initial_redshift=1.0, 
+    def __init__(self, enzo_parameter_file, initial_redshift=1.0, 
                  final_redshift=0.0, observer_redshift=0.0,
                  field_of_view_in_arcminutes=600.0, image_resolution_in_arcseconds=60.0, 
                  use_minimum_datasets=True, deltaz_min=0.0, minimum_coherent_box_fraction=0.0,
@@ -100,7 +100,7 @@
         self.recycleRandomSeed = 0
 
         # Initialize EnzoSimulation machinery for getting dataset list.
-        EnzoSimulation.__init__(self, EnzoParameterFile, initial_redshift=self.initial_redshift,
+        EnzoSimulation.__init__(self, enzo_parameter_file, initial_redshift=self.initial_redshift,
                                 final_redshift=self.final_redshift, links=True,
                                 enzo_parameters={'CosmologyComovingBoxSize':float}, **kwargs)
 
@@ -513,7 +513,7 @@
         else:
             f.write("Original Solution\n")
             f.write("OriginalRandomSeed = %s\n" % self.originalRandomSeed)
-        f.write("EnzoParameterFile = %s\n" % self.EnzoParameterFile)
+        f.write("enzo_parameter_file = %s\n" % self.enzo_parameter_file)
         f.write("\n")
         for q, output in enumerate(self.light_cone_solution):
             f.write("Proj %04d, %s, z = %f, depth/box = %f, width/box = %f, axis = %d, center = %f, %f, %f\n" %


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -42,7 +42,7 @@
     __global_parallel_size = '1',
     __topcomm_parallel_rank = '0',
     __topcomm_parallel_size = '1',
-    storeparameterfiles = 'True',
+    storeparameterfiles = 'False',
     parameterfilestore = 'parameter_files.csv',
     maximumstoredpfs = '500',
     loadfieldplugins = 'True',


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -68,7 +68,6 @@
 from field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \
-    CodeFieldInfoContainer, \
     NeedsGridType, \
     NeedsOriginalGrid, \
     NeedsDataField, \


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -1622,11 +1622,12 @@
         # It is probably faster, as it consolidates IO, but if we did it in
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
+            dependencies = self.get_dependencies(fields, ghost_zones = False)
             print "Preloading %s grids and getting %s" % (
                     len(self.source._get_grid_objs()),
-                    self.get_dependencies(fields))
+                    dependencies)
             self.comm.preload([g for g in self._get_grid_objs()],
-                          self.get_dependencies(fields), self.hierarchy.io)
+                          dependencies, self.hierarchy.io)
         # By changing the remove-from-tree method to accumulate, we can avoid
         # having to do this by level, and instead do it by CPU file
         for level in range(0, self._max_level+1):
@@ -2256,7 +2257,7 @@
         return dls
 
     def _get_data_from_grid(self, grid, fields, dls):
-        g_fields = [grid[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
         FillBuffer(ref_ratio,
@@ -2266,7 +2267,7 @@
             grid.child_mask, self.domain_width, dls[grid.Level],
             self.axis)
 
-class AMR3DData(AMRData, GridPropertiesMixin):
+class AMR3DData(AMRData, GridPropertiesMixin, ParallelAnalysisInterface):
     _key_fields = ['x','y','z','dx','dy','dz']
     """
     Class describing a cluster of data points, not necessarily sharing any
@@ -2280,6 +2281,7 @@
         used as a base class.  Note that *center* is supplied, but only used
         for fields and quantities that require it.
         """
+        ParallelAnalysisInterface.__init__(self)
         AMRData.__init__(self, pf, fields, **kwargs)
         self._set_center(center)
         self.coords = None
@@ -2364,11 +2366,14 @@
             f = grid[field]
             return na.array([f[i,:][pointI] for i in range(3)])
         else:
+            tr = grid[field]
+            if tr.size == 1: # dx, dy, dz, cellvolume
+                tr = tr * na.ones(grid.ActiveDimensions, dtype='float64')
+            if len(grid.Children) == 0 and grid.OverlappingSiblings is None \
+                and self._is_fully_enclosed(grid):
+                return tr.ravel()
             pointI = self._get_point_indices(grid)
-            if grid[field].size == 1: # dx, dy, dz, cellvolume
-                t = grid[field] * na.ones(grid.ActiveDimensions, dtype='float64')
-                return t[pointI].ravel()
-            return grid[field][pointI].ravel()
+            return tr[pointI].ravel()
 
     def _flush_data_to_grids(self, field, default_val, dtype='float32'):
         """
@@ -2479,12 +2484,19 @@
             format.  Suitable for loading into meshlab.
         rescale : bool, optional
             If true, the vertices will be rescaled within their min/max.
+        sample_values : string, optional
+            Any field whose value should be extracted at the center of each
+            triangle.
 
         Returns
         -------
         verts : array of floats
             The array of vertices, x,y,z.  Taken in threes, these are the
             triangle vertices.
+        samples : array of floats
+            If `sample_values` is specified, this will be returned and will
+            contain the values of the field specified at the center of each
+            triangle.
 
         References
         ----------
@@ -2504,9 +2516,7 @@
         """
         verts = []
         samples = []
-        pb = get_pbar("Extracting Isocontours", len(self._grids))
-        for i, g in enumerate(self._grids):
-            pb.update(i)
+        for i, g in enumerate(self._get_grid_objs()):
             mask = self._get_cut_mask(g) * g.child_mask
             vals = g.get_vertex_centered_data(field)
             if sample_values is not None:
@@ -2519,20 +2529,24 @@
                 my_verts, svals = my_verts
                 samples.append(svals)
             verts.append(my_verts)
-        pb.finish()
-        verts = na.concatenate(verts)
+        verts = na.concatenate(verts).transpose()
+        verts = self.comm.par_combine_object(verts, op='cat', datatype='array')
+        verts = verts.transpose()
         if sample_values is not None:
             samples = na.concatenate(samples)
+            samples = self.comm.par_combine_object(samples, op='cat',
+                                datatype='array')
         if rescale:
             mi = na.min(verts, axis=0)
             ma = na.max(verts, axis=0)
             verts = (verts - mi) / (ma - mi).max()
-        if filename is not None:
+        if filename is not None and self.comm.rank == 0:
             f = open(filename, "w")
             for v1 in verts:
                 f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2]))
             for i in range(len(verts)/3):
                 f.write("f %s %s %s\n" % (i*3+1, i*3+2, i*3+3))
+            f.close()
         if sample_values is not None:
             return verts, samples
         return verts
@@ -2602,7 +2616,7 @@
         ...     "x-velocity", "y-velocity", "z-velocity", "Metal_Density")
         """
         flux = 0.0
-        for g in self._grids:
+        for g in self._get_grid_objs():
             mask = self._get_cut_mask(g) * g.child_mask
             vals = g.get_vertex_centered_data(field)
             if fluxing_field is None:
@@ -2613,6 +2627,7 @@
                          [field_x, field_y, field_z]]
             flux += march_cubes_grid_flux(value, vals, xv, yv, zv,
                         ff, mask, g.LeftEdge, g.dds)
+        flux = self.comm.mpi_allreduce(flux, op="sum")
         return flux
 
     def extract_connected_sets(self, field, num_levels, min_val, max_val,
@@ -2668,6 +2683,18 @@
                 particle_handler_registry[self._type_name](self.pf, self)
         return self._particle_handler
 
+
+    def volume(self, unit = "unitary"):
+        """
+        Return the volume of the data container in units *unit*.
+        This is found by adding up the volume of the cells with centers
+        in the container, rather than using the geometric shape of
+        the container, so this may vary very slightly
+        from what might be expected from the geometric volume.
+        """
+        return self.quantities["TotalQuantity"]("CellVolume")[0] * \
+            (self.pf[unit] / self.pf['cm']) ** 3.0
+
 class ExtractedRegionBase(AMR3DData):
     """
     ExtractedRegions are arbitrarily defined containers of data, useful
@@ -2676,6 +2703,31 @@
     _type_name = "extracted_region"
     _con_args = ('_base_region', '_indices')
     def __init__(self, base_region, indices, force_refresh=True, **kwargs):
+        """An arbitrarily defined data container that allows for selection
+        of all data meeting certain criteria.
+
+        In order to create an arbitrarily selected set of data, the
+        ExtractedRegion takes a `base_region` and a set of `indices`
+        and creates a region within the `base_region` consisting of
+        all data indexed by the `indices`. Note that `indices` must be
+        precomputed. This does not work well for parallelized
+        operations.
+
+        Parameters
+        ----------
+        base_region : yt data source
+            A previously selected data source.
+        indices : array_like
+            An array of indices
+
+        Other Parameters
+        ----------------
+        force_refresh : bool
+           Force a refresh of the data. Defaults to True.
+        
+        Examples
+        --------
+        """
         cen = kwargs.pop("center", None)
         if cen is None: cen = base_region.get_field_parameter("center")
         AMR3DData.__init__(self, center=cen,
@@ -2959,10 +3011,22 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data with an arbitrary center.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. If the selected region extends past the edges
+        of the domain, no data will be found there, though the
+        object's `left_edge` or `right_edge` are not modified.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = left_edge
@@ -2991,15 +3055,6 @@
                  & (grid['z'] + dzp > self.left_edge[2]) )
         return cm
 
-    def volume(self, unit = "unitary"):
-        """
-        Return the volume of the region in units *unit*.
-        """
-        diff = na.array(self.right_edge) - na.array(self.left_edge)
-        # Find the full volume
-        vol = na.prod(diff * self.pf[unit])
-        return vol
-
 class AMRRegionStrictBase(AMRRegionBase):
     """
     AMRRegion without any dx padding for cell selection
@@ -3016,10 +3071,25 @@
     _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
-        """
-        We create an object with a set of three *left_edge* coordinates,
-        three *right_edge* coordinates, and a *center* that need not be the
-        center.
+        """A 3D region of data that with periodic boundary
+        conditions if the selected region extends beyond the
+        simulation domain.
+
+        Takes an array of three *left_edge* coordinates, three
+        *right_edge* coordinates, and a *center* that can be anywhere
+        in the domain. The selected region can extend past the edges
+        of the domain, in which case periodic boundary conditions will
+        be applied to fill the region.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the region
+        left_edge : array_like
+            The left edge of the region
+        right_edge : array_like
+            The right edge of the region
+
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3061,27 +3131,21 @@
                           & (grid['z'] + dzp + off_z > self.left_edge[2]) )
             return cm
 
-    def volume(self, unit = "unitary"):
-        """
-        Return the volume of the region in units *unit*.
-        """
-        period = self.pf.domain_right_edge - self.pf.domain_left_edge
-        diff = na.array(self.right_edge) - na.array(self.left_edge)
-        # Correct for wrap-arounds.
-        tofix = (diff < 0)
-        toadd = period[tofix]
-        diff += toadd
-        # Find the full volume
-        vol = na.prod(diff * self.pf[unit])
-        return vol
-        
-
 class AMRPeriodicRegionStrictBase(AMRPeriodicRegionBase):
     """
     AMRPeriodicRegion without any dx padding for cell selection
     """
     _type_name = "periodic_region_strict"
     _dx_pad = 0.0
+    def __init__(self, center, left_edge, right_edge, fields = None,
+                 pf = None, **kwargs):
+        """same as periodic region, but does not include cells unless
+        the selected region encompasses their centers.
+
+        """
+        AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge, 
+                                       fields = None, pf = None, **kwargs)
+    
 
 class AMRGridCollectionBase(AMR3DData):
     """
@@ -3123,9 +3187,20 @@
     _type_name = "sphere"
     _con_args = ('center', 'radius')
     def __init__(self, center, radius, fields = None, pf = None, **kwargs):
-        """
-        The most famous of all the data objects, we define it via a
-        *center* and a *radius*.
+        """A sphere f points defined by a *center* and a *radius*.
+
+        Parameters
+        ----------
+        center : array_like
+            The center of the sphere.
+        radius : float
+            The radius of the sphere.
+
+        Examples
+        --------
+        >>> pf = load("DD0010/moving7_0010")
+        >>> c = [0.5,0.5,0.5]
+        >>> sphere = pf.h.sphere(c,1.*pf['kpc'])
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         # Unpack the radius, if necessary
@@ -3167,18 +3242,25 @@
             self._cut_masks[grid.id] = cm
         return cm
 
-    def volume(self, unit = "unitary"):
-        """
-        Return the volume of the sphere in units *unit*.
-        """
-        return 4./3. * math.pi * (self.radius * self.pf[unit])**3.0
-
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
     _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
     def __init__(self, level, left_edge, dims, fields = None,
                  pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
+        """A 3D region with all data extracted to a single, specified
+        resolution.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         AMR3DData.__init__(self, center=kwargs.pop("center", None),
                            fields=fields, pf=pf, **kwargs)
         self.left_edge = na.array(left_edge)
@@ -3276,7 +3358,7 @@
     def _get_data_from_grid(self, grid, fields):
         ll = int(grid.Level == self.level)
         ref_ratio = self.pf.refine_by**(self.level - grid.Level)
-        g_fields = [grid[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
         c_fields = [self[field] for field in fields]
         count = FillRegion(ref_ratio,
             grid.get_global_startindex(), self.global_startindex,
@@ -3312,6 +3394,24 @@
     _type_name = "smoothed_covering_grid"
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
+        """A 3D region with all data extracted and interpolated to a
+        single, specified resolution.
+
+        Smoothed covering grids start at level 0, interpolating to
+        fill the region to level 1, replacing any cells actually
+        covered by level 1 data, and then recursively repeating this
+        process until it reaches the specified `level`.
+        
+        Parameters
+        ----------
+        level : int
+            The resolution level data is uniformly gridded at
+        left_edge : array_like
+            The left edge of the region to be extracted
+        right_edge : array_like
+            The right edge of the region to be extracted
+
+        """
         self._base_dx = (
               (self.pf.domain_right_edge - self.pf.domain_left_edge) /
                self.pf.domain_dimensions.astype("float64"))
@@ -3333,39 +3433,36 @@
             fields_to_get = self.fields[:]
         else:
             fields_to_get = ensure_list(field)
-        for field in fields_to_get:
-            grid_count = 0
-            if self.field_data.has_key(field):
-                continue
-            mylog.debug("Getting field %s from %s possible grids",
-                       field, len(self._grids))
-            if self._use_pbar: pbar = \
-                    get_pbar('Searching grids for values ', len(self._grids))
-            # Note that, thanks to some trickery, we have different dimensions
-            # on the field than one might think from looking at the dx and the
-            # L/R edges.
-            # We jump-start our task here
-            self._update_level_state(0, field)
-            
-            # The grids are assumed to be pre-sorted
-            last_level = 0
-            for gi, grid in enumerate(self._grids):
-                if self._use_pbar: pbar.update(gi)
-                if grid.Level > last_level and grid.Level <= self.level:
-                    self._update_level_state(last_level + 1)
-                    self._refine(1, field)
-                    last_level = grid.Level
-                self._get_data_from_grid(grid, field)
-            if self.level > 0:
+        fields_to_get = [f for f in fields_to_get if f not in self.field_data]
+        # Note that, thanks to some trickery, we have different dimensions
+        # on the field than one might think from looking at the dx and the
+        # L/R edges.
+        # We jump-start our task here
+        mylog.debug("Getting fields %s from %s possible grids",
+                   fields_to_get, len(self._grids))
+        self._update_level_state(0, fields_to_get)
+        if self._use_pbar: pbar = \
+                get_pbar('Searching grids for values ', len(self._grids))
+        # The grids are assumed to be pre-sorted
+        last_level = 0
+        for gi, grid in enumerate(self._grids):
+            if self._use_pbar: pbar.update(gi)
+            if grid.Level > last_level and grid.Level <= self.level:
+                self._update_level_state(last_level + 1)
+                self._refine(1, fields_to_get)
+                last_level = grid.Level
+            self._get_data_from_grid(grid, fields_to_get)
+        if self.level > 0:
+            for field in fields_to_get:
                 self[field] = self[field][1:-1,1:-1,1:-1]
-            if na.any(self[field] == -999):
-                # and self.dx < self.hierarchy.grids[0].dx:
-                n_bad = na.where(self[field]==-999)[0].size
-                mylog.error("Covering problem: %s cells are uncovered", n_bad)
-                raise KeyError(n_bad)
-            if self._use_pbar: pbar.finish()
-
-    def _update_level_state(self, level, field = None):
+                if na.any(self[field] == -999):
+                    # and self.dx < self.hierarchy.grids[0].dx:
+                    n_bad = (self[field]==-999).sum()
+                    mylog.error("Covering problem: %s cells are uncovered", n_bad)
+                    raise KeyError(n_bad)
+        if self._use_pbar: pbar.finish()
+
+    def _update_level_state(self, level, fields = None):
         dx = self._base_dx / self.pf.refine_by**level
         self.field_data['cdx'] = dx[0]
         self.field_data['cdy'] = dx[1]
@@ -3378,16 +3475,20 @@
         if level == 0 and self.level > 0:
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
-            self.field_data[field] = na.zeros(idims,dtype='float64')-999
+            fields = ensure_list(fields)
+            for field in fields:
+                self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
-            self.field_data[field] = na.zeros(idims,dtype='float64')-999
+            fields = ensure_list(fields)
+            for field in fields:
+                self.field_data[field] = na.zeros(idims,dtype='float64')-999
             self._cur_dims = idims.astype("int32")
 
-    def _refine(self, dlevel, field):
+    def _refine(self, dlevel, fields):
         rf = float(self.pf.refine_by**dlevel)
 
         input_left = (self._old_global_startindex + 0.5) * rf 
@@ -3396,16 +3497,17 @@
 
         self._cur_dims = output_dims
 
-        output_field = na.zeros(output_dims, dtype="float64")
-        output_left = self.global_startindex + 0.5
-        ghost_zone_interpolate(rf, self[field], input_left,
-                               output_field, output_left)
-        self[field] = output_field
+        for field in fields:
+            output_field = na.zeros(output_dims, dtype="float64")
+            output_left = self.global_startindex + 0.5
+            ghost_zone_interpolate(rf, self[field], input_left,
+                                   output_field, output_left)
+            self.field_data[field] = output_field
 
     def _get_data_from_grid(self, grid, fields):
         fields = ensure_list(fields)
-        g_fields = [grid[field] for field in fields]
-        c_fields = [self[field] for field in fields]
+        g_fields = [grid[field].astype("float64") for field in fields]
+        c_fields = [self.field_data[field] for field in fields]
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
@@ -3422,7 +3524,7 @@
     existing regions.
     """
     _type_name = "boolean"
-    _con_args = {"regions"}
+    _con_args = ("regions")
     def __init__(self, regions, fields = None, pf = None, **kwargs):
         """
         This will build a hybrid region based on the boolean logic
@@ -3455,6 +3557,7 @@
         self._cut_masks = {}
         self._get_all_regions()
         self._make_overlaps()
+        self._get_list_of_grids()
     
     def _get_all_regions(self):
         # Before anything, we simply find out which regions are involved in all
@@ -3559,7 +3662,6 @@
             self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask
 
-
 def _reconstruct_object(*args, **kwargs):
     pfid = args[0]
     dtype = args[1]


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -151,24 +151,11 @@
     """
     baryon_mass = data["CellMassMsun"].sum()
     particle_mass = data["ParticleMassMsun"].sum()
-    return baryon_mass, particle_mass
-def _combTotalMass(data, baryon_mass, particle_mass):
-    return baryon_mass.sum() + particle_mass.sum()
+    return [baryon_mass + particle_mass]
+def _combTotalMass(data, total_mass):
+    return total_mass.sum()
 add_quantity("TotalMass", function=_TotalMass,
-             combine_function=_combTotalMass, n_ret = 2)
-
-def _MatterMass(data):
-    """
-    This function takes no arguments and returns the array sum of cell masses
-    and particle masses.
-    """
-    cellvol = data["CellVolume"]
-    matter_rho = data["Matter_Density"]
-    return cellvol, matter_rho 
-def _combMatterMass(data, cellvol, matter_rho):
-    return cellvol*matter_rho
-add_quantity("MatterMass", function=_MatterMass,
-	     combine_function=_combMatterMass, n_ret=2)
+             combine_function=_combTotalMass, n_ret=1)
 
 def _CenterOfMass(data, use_cells=True, use_particles=False):
     """
@@ -358,7 +345,7 @@
     Add the mass contribution of particles if include_particles = True
     """
     if (include_particles):
-	mass_to_use = data.quantities["MatterMass"]()[0] 
+	mass_to_use = data["TotalMass"]
     else:
 	mass_to_use = data["CellMass"]
     kinetic = 0.5 * (mass_to_use * (


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -34,37 +34,17 @@
 
 from yt.funcs import *
 
-class FieldInfoContainer(object): # We are all Borg.
+class FieldInfoContainer(dict): # Resistance has utility
     """
     This is a generic field container.  It contains a list of potential derived
     fields, all of which know how to act on a data object and return a value.
     This object handles converting units as well as validating the availability
     of a given field.
+
     """
-    _shared_state = {}
-    _universal_field_list = {}
-    def __new__(cls, *args, **kwargs):
-        self = object.__new__(cls, *args, **kwargs)
-        self.__dict__ = cls._shared_state
-        return self
-    def __getitem__(self, key):
-        if key in self._universal_field_list:
-            return self._universal_field_list[key]
-        raise KeyError
-    def keys(self):
-        """ Return all the field names this object knows about. """
-        return self._universal_field_list.keys()
+    fallback = None
 
-    def __iter__(self):
-        return self._universal_field_list.iterkeys()
-
-    def __setitem__(self, key, val):
-        self._universal_field_list[key] = val
-
-    def has_key(self, key):
-        return key in self._universal_field_list
-
-    def add_field(self, name, function = None, **kwargs):
+    def add_field(self, name, function=None, **kwargs):
         """
         Add a new field, along with supplemental metadata, to the list of
         available fields.  This respects a number of arguments, all of which
@@ -79,6 +59,41 @@
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
 
+    def has_key(self, key):
+        # This gets used a lot
+        if key in self: return True
+        if self.fallback is None: return False
+        return self.fallback.has_key(key)
+
+    def __missing__(self, key):
+        if self.fallback is None:
+            raise KeyError("No field named %s" % key)
+        return self.fallback[key]
+
+    @classmethod
+    def create_with_fallback(cls, fallback):
+        obj = cls()
+        obj.fallback = fallback
+        return obj
+
+    def __contains__(self, key):
+        if dict.__contains__(self, key): return True
+        if self.fallback is None: return False
+        return self.fallback.has_key(key)
+
+    def __iter__(self):
+        for f in dict.__iter__(self): yield f
+        if self.fallback:
+            for f in self.fallback: yield f
+
+def TranslationFunc(field_name):
+    def _TranslationFunc(field, data):
+        return data[field_name]
+    return _TranslationFunc
+
+def NullFunc(field, data):
+    return
+
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
 
@@ -91,28 +106,6 @@
         return function
     return inner_decorator
 
-class CodeFieldInfoContainer(FieldInfoContainer):
-    def __setitem__(self, key, val):
-        self._field_list[key] = val
-
-    def __iter__(self):
-        return itertools.chain(self._field_list.iterkeys(),
-                               self._universal_field_list.iterkeys())
-
-    def keys(self):
-        return set(self._field_list.keys() + self._universal_field_list.keys())
-
-    def has_key(self, key):
-        return key in self._universal_field_list \
-            or key in self._field_list
-
-    def __getitem__(self, key):
-        if key in self._field_list:
-            return self._field_list[key]
-        if key in self._universal_field_list:
-            return self._universal_field_list[key]
-        raise KeyError(key)
-
 class ValidationException(Exception):
     pass
 
@@ -120,7 +113,6 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
-
     def __str__(self):
         return "(%s, %s)" % (self.ghost_zones, self.fields)
 
@@ -131,21 +123,18 @@
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
-
     def __str__(self):
         return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
-
     def __str__(self):
         return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
-
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
@@ -159,14 +148,16 @@
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
-        self.ActiveDimensions = [nd, nd, nd]
+        self.ActiveDimensions = [nd,nd,nd]
         self.LeftEdge = [0.0, 0.0, 0.0]
         self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = na.ones(3, "float64")
         self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
-        if pf is None:  # setup defaults
+
+        if pf is None:
+            # required attrs
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
@@ -174,6 +165,7 @@
             pf.domain_right_edge = na.ones(3, 'float64')
             pf.dimensionality = 3
         self.pf = pf
+
         class fake_hierarchy(object):
             class fake_io(object):
                 def _read_data_set(io_self, data, field):
@@ -194,47 +186,42 @@
             defaultdict.__init__(self, 
                 lambda: na.ones((nd * nd * nd), dtype='float64')
                 + 1e-4*na.random.random((nd * nd * nd)))
+
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(item) and \
-            FI[item]._function.func_name != '<lambda>':
+        if FI.has_key(item) and FI[item]._function.func_name != 'NullFunc':
             try:
                 vv = FI[item](self)
             except NeedsGridType as exc:
                 ngz = exc.ghost_zones
-                nfd = FieldDetector(self.nd+ngz*2)
+                nfd = FieldDetector(self.nd + ngz * 2)
                 nfd._num_ghost_zones = ngz
                 vv = FI[item](nfd)
                 if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]
-
                 for i in nfd.requested:
                     if i not in self.requested: self.requested.append(i)
-
                 for i in nfd.requested_parameters:
                     if i not in self.requested_parameters:
                         self.requested_parameters.append(i)
-
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
                 return self[item]
-
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
     def _read_data(self, field_name):
         self.requested.append(field_name)
         FI = getattr(self.pf, "field_info", FieldInfo)
-        if FI.has_key(field_name) and \
-           FI[field_name].particle_type:
+        if FI.has_key(field_name) and FI[field_name].particle_type:
             self.requested.append(field_name)
             return na.ones(self.NumberOfParticles)
         return defaultdict.__missing__(self, field_name)
 
     def get_field_parameter(self, param):
         self.requested_parameters.append(param)
-        if param in ['bulk_velocity','center','height_vector']:
-            return na.random.random(3)*1e-2
+        if param in ['bulk_velocity', 'center', 'height_vector']:
+            return na.random.random(3) * 1e-2
         else:
             return 0.0
     _num_ghost_zones = 0
@@ -258,40 +245,35 @@
         :param function: is a function handle that defines the field
         :param convert_function: must convert to CGS, if it needs to be done
         :param units: is a mathtext-formatted string that describes the field
-        :param projected_units: if we display a projection, what should the units be?
+        :param projected_units: if we display a projection, what should the
+                                units be?
         :param take_log: describes whether the field should be logged
         :param validators: is a list of :class:`FieldValidator` objects
         :param particle_type: is this field based on particles?
         :param vector_field: describes the dimensionality of the field
         :param display_field: governs its appearance in the dropdowns in reason
-        :param not_in_all: is used for baryon fields from the data that are not in
-                           all the grids
+        :param not_in_all: is used for baryon fields from the data that are not
+                           in all the grids
         :param display_name: a name used in the plots
         :param projection_conversion: which unit should we multiply by in a
                                       projection?
-
         """
         self.name = name
         self._function = function
-
         if validators:
             self.validators = ensure_list(validators)
         else:
             self.validators = []
-
         self.take_log = take_log
         self._units = units
         self._projected_units = projected_units
-
         if not convert_function:
             convert_function = lambda a: 1.0
         self._convert_function = convert_function
         self._particle_convert_function = particle_convert_function
-
         self.particle_type = particle_type
         self.vector_field = vector_field
         self.projection_conversion = projection_conversion
-
         self.display_field = display_field
         self.display_name = display_name
         self.not_in_all = not_in_all
@@ -300,7 +282,6 @@
         """
         This raises an exception of the appropriate type if the set of
         validation mechanisms are not met, and otherwise returns True.
-
         """
         for validator in self.validators:
             validator(data)
@@ -310,7 +291,6 @@
     def get_dependencies(self, *args, **kwargs):
         """
         This returns a list of names of fields that this field depends on.
-
         """
         e = FieldDetector(*args, **kwargs)
         if self._function.func_name == '<lambda>':
@@ -320,50 +300,43 @@
         return e
 
     def get_units(self):
-        """ Return a string describing the units.  """
+        """ Return a string describing the units. """
         return self._units
 
     def get_projected_units(self):
         """
         Return a string describing the units if the field has been projected.
-
         """
         return self._projected_units
 
     def __call__(self, data):
-        """ Return the value of the field in a given *data* object.  """
+        """ Return the value of the field in a given *data* object. """
         ii = self.check_available(data)
         original_fields = data.keys() # Copy
         dd = self._function(self, data)
         dd *= self._convert_function(data)
-
         for field_name in data.keys():
             if field_name not in original_fields:
                 del data[field_name]
-
         return dd
 
     def get_source(self):
         """
         Return a string containing the source of the function (if possible.)
-
         """
         return inspect.getsource(self._function)
 
     def get_label(self, projected=False):
         """
         Return a data label for the given field, inluding units.
-
         """
         name = self.name
         if self.display_name is not None: name = self.display_name
         data_label = r"$\rm{%s}" % name
-
         if projected: units = self.get_projected_units()
         else: units = self.get_units()
         if units != "": data_label += r"\/\/ (%s)" % (units)
         data_label += r"$"
-
         return data_label
 
     def particle_convert(self, data):
@@ -378,11 +351,9 @@
     def __init__(self, parameters):
         """
         This validator ensures that the parameter file has a given parameter.
-
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
-
     def __call__(self, data):
         doesnt_have = []
         for p in self.parameters:
@@ -395,13 +366,11 @@
 class ValidateDataField(FieldValidator):
     def __init__(self, field):
         """
-        This validator ensures that the output file has a given data field
-        stored in it.
-
+        This validator ensures that the output file has a given data field stored
+        in it.
         """
         FieldValidator.__init__(self)
         self.fields = ensure_list(field)
-
     def __call__(self, data):
         doesnt_have = []
         if isinstance(data, FieldDetector): return True
@@ -410,19 +379,15 @@
                 doesnt_have.append(f)
         if len(doesnt_have) > 0:
             raise NeedsDataField(doesnt_have)
-
         return True
 
 class ValidateProperty(FieldValidator):
     def __init__(self, prop):
         """
-        This validator ensures that the data object has a given python
-        attribute.
-
+        This validator ensures that the data object has a given python attribute.
         """
         FieldValidator.__init__(self)
         self.prop = ensure_list(prop)
-
     def __call__(self, data):
         doesnt_have = []
         for p in self.prop:
@@ -430,7 +395,6 @@
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
-
         return True
 
 class ValidateSpatial(FieldValidator):
@@ -438,15 +402,13 @@
         """
         This validator ensures that the data handed to the field is of spatial
         nature -- that is to say, 3-D.
-
         """
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
-
     def __call__(self, data):
-        # When we say spatial information, we really mean that it has a
-        # three-dimensional data structure
+        # When we say spatial information, we really mean
+        # that it has a three-dimensional data structure
         #if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
@@ -459,10 +421,8 @@
         """
         This validator ensures that the data handed to the field is an actual
         grid patch, not a covering grid of any kind.
-
         """
         FieldValidator.__init__(self)
-
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -3,7 +3,7 @@
 
 Author: Matthew Turk <matthewturk at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
+Homepage: http://yt.enzotools.org/
 License:
   Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
 
@@ -25,10 +25,12 @@
 
 import exceptions
 import pdb
-import numpy as na
 import weakref
 
+import numpy as na
+
 from yt.funcs import *
+from yt.utilities.definitions import x_dict, y_dict
 
 from yt.data_objects.data_containers import YTFieldData
 from yt.utilities.definitions import x_dict, y_dict
@@ -75,20 +77,21 @@
         if self.start_index is not None:
             return self.start_index
         if self.Parent == None:
-            iLE = self.LeftEdge - self.pf.domain_left_edge
-            start_index = iLE / self.dds
+            left = self.LeftEdge - self.pf.domain_left_edge
+            start_index = left / self.dds
             return na.rint(start_index).astype('int64').ravel()
+
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
-                       na.rint((self.LeftEdge - self.Parent.LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+                       na.rint((self.LeftEdge - self.Parent.LeftEdge) / pdx)
+        self.start_index = (start_index * self.pf.refine_by).astype('int64').ravel()
         return self.start_index
 
-
     def get_field_parameter(self, name, default=None):
         """
-        This is typically only used by derived field functions, but
-        it returns parameters used to generate fields.
+        This is typically only used by derived field functions, but it returns
+        parameters used to generate fields.
+
         """
         if self.field_parameters.has_key(name):
             return self.field_parameters[name]
@@ -99,19 +102,19 @@
         """
         Here we set up dictionaries that get passed up and down and ultimately
         to derived fields.
+
         """
         self.field_parameters[name] = val
 
     def has_field_parameter(self, name):
-        """
-        Checks if a field parameter is set.
-        """
+        """ Checks if a field parameter is set. """
         return self.field_parameters.has_key(name)
 
     def convert(self, datatype):
         """
-        This will attempt to convert a given unit to cgs from code units.
-        It either returns the multiplicative factor or throws a KeyError.
+        This will attempt to convert a given unit to cgs from code units. It
+        either returns the multiplicative factor or throws a KeyError.
+
         """
         return self.pf[datatype]
 
@@ -119,7 +122,7 @@
         # We'll do this the slow way to be clear what's going on
         s = "%s (%s): " % (self.__class__.__name__, self.pf)
         s += ", ".join(["%s=%s" % (i, getattr(self,i))
-                       for i in self._con_args])
+                        for i in self._con_args])
         return s
 
     def _generate_field(self, field):
@@ -133,7 +136,7 @@
                 f_gz = ngt_exception.fields
                 gz_grid = self.retrieve_ghost_zones(n_gz, f_gz, smoothed=True)
                 temp_array = self.pf.field_info[field](gz_grid)
-                sl = [slice(n_gz,-n_gz)] * 3
+                sl = [slice(n_gz, -n_gz)] * 3
                 self[field] = temp_array[sl]
             else:
                 self[field] = self.pf.field_info[field](self)
@@ -166,14 +169,14 @@
     def keys(self):
         return self.field_data.keys()
 
-    def get_data(self, field):
+    def get_data(self, field, convert = True):
         """
         Returns a field or set of fields for a key or set of keys
         """
         if not self.field_data.has_key(field):
             if field in self.hierarchy.field_list:
                 conv_factor = 1.0
-                if self.pf.field_info.has_key(field):
+                if self.pf.field_info.has_key(field) and convert == True:
                     conv_factor = self.pf.field_info[field]._convert_function(self)
                 if self.pf.field_info[field].particle_type and \
                    self.NumberOfParticles == 0:
@@ -196,14 +199,14 @@
 
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        # that dx=dy=dz, at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if self.Parent is not None:
             self.dds = self.Parent.dds / self.pf.refine_by
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
-            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+            self.dds = na.array((RE - LE) / self.ActiveDimensions)
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -226,6 +229,7 @@
         Generate a mask that shows which cells overlap with arbitrary arrays
         *LE* and *RE*) of edges, typically grids, along *axis*.
         Use algorithm described at http://www.gamedev.net/reference/articles/article735.asp
+
         """
         x = x_dict[axis]
         y = y_dict[axis]
@@ -243,8 +247,9 @@
 
     def clear_data(self):
         """
-        Clear out the following things: child_mask, child_indices,
-        all fields, all field parameters.
+        Clear out the following things: child_mask, child_indices, all fields,
+        all field parameters.
+
         """
         self._del_child_mask()
         self._del_child_indices()
@@ -255,9 +260,7 @@
         return self._child_mask, self._child_indices
 
     def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
+        """ Copies all the appropriate attributes from the hierarchy. """
         # This is definitely the slowest part of generating the hierarchy
         # Now we give it pointers to all of its attributes
         # Note that to keep in line with Enzo, we have broken PEP-8
@@ -269,33 +272,27 @@
         h.grid_levels[my_ind, 0] = self.Level
         # This might be needed for streaming formats
         #self.Time = h.gridTimes[my_ind,0]
-        self.NumberOfParticles = h.grid_particle_count[my_ind,0]
+        self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
     def __len__(self):
         return na.prod(self.ActiveDimensions)
 
     def find_max(self, field):
-        """
-        Returns value, index of maximum value of *field* in this gird
-        """
-        coord1d=(self[field]*self.child_mask).argmax()
-        coord=na.unravel_index(coord1d, self[field].shape)
+        """ Returns value, index of maximum value of *field* in this grid. """
+        coord1d = (self[field] * self.child_mask).argmax()
+        coord = na.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def find_min(self, field):
-        """
-        Returns value, index of minimum value of *field* in this gird
-        """
-        coord1d=(self[field]*self.child_mask).argmin()
-        coord=na.unravel_index(coord1d, self[field].shape)
+        """ Returns value, index of minimum value of *field* in this grid. """
+        coord1d = (self[field] * self.child_mask).argmin()
+        coord = na.unravel_index(coord1d, self[field].shape)
         val = self[field][coord]
         return val, coord
 
     def get_position(self, index):
-        """
-        Returns center position of an *index*
-        """
+        """ Returns center position of an *index*. """
         pos = (index + 0.5) * self.dds + self.LeftEdge
         return pos
 
@@ -303,6 +300,7 @@
         """
         Clears all datafields from memory and calls
         :meth:`clear_derived_quantities`.
+
         """
         for key in self.keys():
             del self.field_data[key]
@@ -313,9 +311,7 @@
         self.clear_derived_quantities()
 
     def clear_derived_quantities(self):
-        """
-        Clears coordinates, child_indices, child_mask.
-        """
+        """ Clears coordinates, child_indices, child_mask. """
         # Access the property raw-values here
         del self.child_mask
         del self.child_ind
@@ -368,10 +364,10 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by**(child.Level - self.Level)
+        rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
-        startIndex = na.maximum(0, cgi/rf - gi)
-        endIndex = na.minimum( (cgi+child.ActiveDimensions)/rf - gi,
+        startIndex = na.maximum(0, cgi / rf - gi)
+        endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,
                               self.ActiveDimensions)
         endIndex += (startIndex == endIndex)
         mask[startIndex[0]:endIndex[0],
@@ -381,7 +377,8 @@
     def __generate_child_mask(self):
         """
         Generates self.child_mask, which is zero where child grids exist (and
-        thus, where higher resolution data is available.)
+        thus, where higher resolution data is available).
+
         """
         self._child_mask = na.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
@@ -396,6 +393,7 @@
         """
         Generates self.child_index_mask, which is -1 where there is no child,
         and otherwise has the ID of the grid that resides there.
+
         """
         self._child_index_mask = na.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
@@ -410,10 +408,10 @@
         if self.__coords == None: self._generate_coords()
         return self.__coords
 
-    def _set_coords(self, newC):
+    def _set_coords(self, new_c):
         if self.__coords != None:
             mylog.warning("Overriding coords attribute!  This is probably unwise!")
-        self.__coords = newC
+        self.__coords = new_c
 
     def _del_coords(self):
         del self.__coords
@@ -421,12 +419,12 @@
 
     def _generate_coords(self):
         """
-        Creates self.coords, which is of dimensions (3,ActiveDimensions)
+        Creates self.coords, which is of dimensions (3, ActiveDimensions)
+
         """
-        #print "Generating coords"
         ind = na.indices(self.ActiveDimensions)
-        LE = na.reshape(self.LeftEdge,(3,1,1,1))
-        self['x'], self['y'], self['z'] = (ind+0.5)*self.dds+LE
+        left_shaped = na.reshape(self.LeftEdge, (3, 1, 1, 1))
+        self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
 
     child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
     child_index_mask = property(fget=_get_child_index_mask, fdel=_del_child_index_mask)
@@ -437,9 +435,10 @@
         # We will attempt this by creating a datacube that is exactly bigger
         # than the grid by nZones*dx in each direction
         nl = self.get_global_startindex() - n_zones
-        nr = nl + self.ActiveDimensions + 2*n_zones
+        nr = nl + self.ActiveDimensions + 2 * n_zones
         new_left_edge = nl * self.dds + self.pf.domain_left_edge
         new_right_edge = nr * self.dds + self.pf.domain_left_edge
+
         # Something different needs to be done for the root grid, though
         level = self.Level
         if all_levels:
@@ -452,32 +451,17 @@
         # those of this grid.
         kwargs.update(self.field_parameters)
         if smoothed:
-            #cube = self.hierarchy.smoothed_covering_grid(
-            #    level, new_left_edge, new_right_edge, **kwargs)
             cube = self.hierarchy.smoothed_covering_grid(
                 level, new_left_edge, **kwargs)
         else:
-            cube = self.hierarchy.covering_grid(
-                level, new_left_edge, **kwargs)
+            cube = self.hierarchy.covering_grid(level, new_left_edge, **kwargs)
+
         return cube
 
-    def get_vertex_centered_data(self, field, smoothed=True,
-                                 no_ghost=False):
-        if not no_ghost:
-            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
-            # We have two extra zones in every direction
-            new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
-            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
-            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
-            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
-            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
-            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
-            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
-            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
-            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
-            na.multiply(new_field, 0.125, new_field)
-        else:
-            new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+    def get_vertex_centered_data(self, field, smoothed=True, no_ghost=False):
+        new_field = na.zeros(self.ActiveDimensions + 1, dtype='float64')
+
+        if no_ghost:
             of = self[field]
             new_field[:-1,:-1,:-1] += of
             new_field[:-1,:-1,1:] += of
@@ -493,13 +477,23 @@
 
             new_field[:,:, -1] = 2.0*new_field[:,:,-2] - new_field[:,:,-3]
             new_field[:,:, 0]  = 2.0*new_field[:,:,1] - new_field[:,:,2]
-
             new_field[:,-1, :] = 2.0*new_field[:,-2,:] - new_field[:,-3,:]
             new_field[:,0, :]  = 2.0*new_field[:,1,:] - new_field[:,2,:]
-
             new_field[-1,:,:] = 2.0*new_field[-2,:,:] - new_field[-3,:,:]
             new_field[0,:,:]  = 2.0*new_field[1,:,:] - new_field[2,:,:]
+
             if self.pf.field_info[field].take_log:
                 na.power(10.0, new_field, new_field)
+        else:
+            cg = self.retrieve_ghost_zones(1, field, smoothed=smoothed)
+            na.add(new_field, cg[field][1: ,1: ,1: ], new_field)
+            na.add(new_field, cg[field][:-1,1: ,1: ], new_field)
+            na.add(new_field, cg[field][1: ,:-1,1: ], new_field)
+            na.add(new_field, cg[field][1: ,1: ,:-1], new_field)
+            na.add(new_field, cg[field][:-1,1: ,:-1], new_field)
+            na.add(new_field, cg[field][1: ,:-1,:-1], new_field)
+            na.add(new_field, cg[field][:-1,:-1,1: ], new_field)
+            na.add(new_field, cg[field][:-1,:-1,:-1], new_field)
+            na.multiply(new_field, 0.125, new_field)
+
         return new_field
-


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/hierarchy.py
--- a/yt/data_objects/hierarchy.py
+++ b/yt/data_objects/hierarchy.py
@@ -35,12 +35,12 @@
 
 from yt.arraytypes import blankRecordArray
 from yt.config import ytcfg
+from yt.data_objects.field_info_container import NullFunc
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_splitter
-from object_finding_mixin import \
-    ObjectFindingMixin
+from object_finding_mixin import ObjectFindingMixin
 
 from .data_containers import data_object_registry
 
@@ -137,6 +137,32 @@
             self.proj = self.overlap_proj
         self.object_types.sort()
 
+    def _setup_unknown_fields(self):
+        known_fields = self.parameter_file._fieldinfo_known
+        for field in self.field_list:
+            # By allowing a backup, we don't mandate that it's found in our
+            # current field info.  This means we'll instead simply override
+            # it.
+            ff = self.parameter_file.field_info.pop(field, None)
+            if field not in known_fields:
+                rootloginfo("Adding unknown field %s to list of fields", field)
+                cf = None
+                if self.parameter_file.has_key(field):
+                    def external_wrapper(f):
+                        def _convert_function(data):
+                            return data.convert(f)
+                        return _convert_function
+                    cf = external_wrapper(field)
+                # Note that we call add_field on the field_info directly.  This
+                # will allow the same field detection mechanism to work for 1D, 2D
+                # and 3D fields.
+                self.pf.field_info.add_field(
+                        field, NullFunc,
+                        convert_function=cf, take_log=False, units=r"Unknown")
+            else:
+                mylog.debug("Adding known field %s to list of fields", field)
+                self.parameter_file.field_info[field] = known_fields[field]
+            
     # Now all the object related stuff
 
     def all_data(self, find_max=False):


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/object_finding_mixin.py
--- a/yt/data_objects/object_finding_mixin.py
+++ b/yt/data_objects/object_finding_mixin.py
@@ -185,20 +185,21 @@
         return self.grids[grid_i], grid_i
 
     def get_periodic_box_grids(self, left_edge, right_edge):
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
         mask = na.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        dw = dr - dl
+        left_dist = left_edge - dl
         db = right_edge - left_edge
         for off_x in [-1, 0, 1]:
             nle = left_edge.copy()
-            nre = left_edge.copy()
-            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
             for off_y in [-1, 0, 1]:
-                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
                 for off_z in [-1, 0, 1]:
-                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
                     nre = nle + db
                     g, gi = self.get_box_grids(nle, nre)
                     mask[gi] = True
@@ -215,20 +216,21 @@
         return self.grids[mask], na.where(mask)
 
     def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
-        left_edge = na.array(left_edge)
-        right_edge = na.array(right_edge)
         mask = na.zeros(self.grids.shape, dtype='bool')
         dl = self.parameter_file.domain_left_edge
         dr = self.parameter_file.domain_right_edge
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        dw = dr - dl
+        left_dist = left_edge - dl
         db = right_edge - left_edge
         for off_x in [-1, 0, 1]:
             nle = left_edge.copy()
-            nre = left_edge.copy()
-            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            nle[0] = (dw[0]*off_x + dl[0]) + left_dist[0]
             for off_y in [-1, 0, 1]:
-                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                nle[1] = (dw[1]*off_y + dl[1]) + left_dist[1]
                 for off_z in [-1, 0, 1]:
-                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nle[2] = (dw[2]*off_z + dl[2]) + left_dist[2]
                     nre = nle + db
                     g, gi = self.get_box_grids_below_level(nle, nre, level)
                     mask[gi] = True


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -35,6 +35,8 @@
     ParameterFileStore, \
     NoParameterShelf, \
     output_type_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 # We want to support the movie format in the future.
 # When such a thing comes to pass, I'll move all the stuff that is contant up
@@ -96,6 +98,8 @@
                 pass
         self.print_key_parameters()
 
+        self.create_field_info()
+
     def __reduce__(self):
         args = (self._hash(),)
         return (_reconstruct_pf, args)
@@ -189,6 +193,17 @@
                 v = getattr(self, a)
                 mylog.info("Parameters: %-25s = %s", a, v)
 
+    def create_field_info(self):
+        if getattr(self, "field_info", None) is None:
+            # The setting up of fields occurs in the hierarchy, which is only
+            # instantiated once.  So we have to double check to make sure that,
+            # in the event of double-loads of a parameter file, we do not blow
+            # away the exising field_info.
+            self.field_info = FieldInfoContainer.create_with_fallback(
+                                self._fieldinfo_fallback)
+
+        
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -112,7 +112,8 @@
         outputs = []
         for line in open(output_log):
             if not line.startswith(line_prefix): continue
-            fn = line[len(line_prefix):].strip()
+            cut_line = line[len(line_prefix):].strip()
+            fn = cut_line.split()[0]
             outputs.append(load(fn))
         obj = cls(outputs)
         return obj


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -30,11 +30,9 @@
 import inspect
 import copy
 
-from math import pi
-
 from yt.funcs import *
 
-from yt.utilities.amr_utils import CICDeposit_3
+from yt.utilities.amr_utils import CICDeposit_3, obtain_rvec
 from yt.utilities.cosmology import Cosmology
 from field_info_container import \
     add_field, \
@@ -139,88 +137,6 @@
 add_field("SoundSpeed", function=_SoundSpeed,
           units=r"\rm{cm}/\rm{s}")
 
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return na.array([], dtype=dtype)
-        try:
-            return io._read_data_set(data, p_field).astype(dtype)
-        except io._read_exception:
-            pass
-        # This is bad.  But it's the best idea I have right now.
-        return data._read_data(p_field.replace("_"," ")).astype(dtype)
-    return _Particles
-for pf in ["type", "mass"] + \
-          ["position_%s" % ax for ax in 'xyz']:
-    pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _convRetainInt(data):
-    return 1
-add_field("particle_index", function=particle_func("particle_index", "int64"),
-          validators = [ValidateSpatial(0)], particle_type=True,
-          convert_function=_convRetainInt)
-
-def _get_vel_convert(ax):
-    def _convert_p_vel(data):
-        return data.convert("%s-velocity" % ax)
-    return _convert_p_vel
-for ax in 'xyz':
-    pf = "particle_velocity_%s" % ax
-    pfunc = particle_func(pf)
-    cfunc = _get_vel_convert(ax)
-    add_field(pf, function=pfunc, convert_function=cfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
-    pfunc = particle_func(pf)
-    add_field(pf, function=pfunc,
-              validators = [ValidateSpatial(0),
-                            ValidateDataField(pf)],
-              particle_type=True)
-add_field("particle_mass", function=particle_func("particle_mass"),
-          validators=[ValidateSpatial(0)], particle_type=True)
-
-def _ParticleAge(field, data):
-    current_time = data.pf.current_time
-    return (current_time - data["creation_time"])
-def _convertParticleAge(data):
-    return data.convert("years")
-add_field("ParticleAge", function=_ParticleAge,
-          validators=[ValidateDataField("creation_time")],
-          particle_type=True, convert_function=_convertParticleAge)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64') * \
-                just_one(data["CellVolumeCode"].ravel())
-    # Note that we mandate grid-type here, so this is okay
-    return particles
-
-def _convertParticleMass(data):
-    return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
-    return cf
-def _convertParticleMassMsun(data):
-    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
-    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
-    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
-    return cf
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMass,
-          particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_IOLevelParticleMassMsun)
-
 def _RadialMachNumber(field, data):
     """M{|v|/t_sound}"""
     return na.abs(data["RadialVelocity"]) / data["SoundSpeed"]
@@ -348,7 +264,7 @@
     M{sqrt(3pi/(16*G*rho))} or M{sqrt(3pi/(16G))*rho^-(1/2)}
     Note that we return in our natural units already
     """
-    return (3.0*pi/(16*G*data["Density"]))**(1./2.)
+    return (3.0*na.pi/(16*G*data["Density"]))**(1./2.)
 add_field("DynamicalTime", function=_DynamicalTime,
            units=r"\rm{s}")
 
@@ -377,6 +293,7 @@
 
 def _TotalMass(field,data):
     return (data["Density"]+data["Dark_Matter_Density"]) * data["CellVolume"]
+add_field("TotalMass", function=_TotalMass, units=r"\rm{g}")
 add_field("TotalMassMsun", units=r"M_{\odot}",
           function=_TotalMass,
           convert_function=_convertCellMassMsun)
@@ -510,7 +427,7 @@
     return new_field2
 add_field("AveragedDensity",
           function=_AveragedDensity,
-          validators=[ValidateSpatial(1)])
+          validators=[ValidateSpatial(1, ["Density"])])
 
 def _DivV(field, data):
     # We need to set up stencils
@@ -566,13 +483,6 @@
     zv = data["z-velocity"] - bv[2]
     return xv, yv, zv
 
-def obtain_rvec(data):
-    center = data.get_field_parameter('center')
-    coords = na.array([data['x'],data['y'],data['z']], dtype='float64')
-    new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-    r_vec = coords - na.reshape(center,new_shape)
-    return r_vec # axis 0 is the x,y,z
-
 def _SpecificAngularMomentum(field, data):
     """
     Calculate the angular velocity.  Returns a vector for each cell.


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/art/api.py
--- a/yt/frontends/art/api.py
+++ b/yt/frontends/art/api.py
@@ -34,7 +34,6 @@
       ARTStaticOutput
 
 from .fields import \
-      ARTFieldContainer, \
       ARTFieldInfo, \
       add_art_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -37,8 +37,10 @@
       AMRHierarchy
 from yt.data_objects.static_output import \
       StaticOutput
-from .fields import ARTFieldContainer
-from .fields import add_field
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    ARTFieldInfo, add_art_field, KnownARTFields
 from yt.utilities.definitions import \
     mpc_conversion
 from yt.utilities.io_handler import \
@@ -113,7 +115,6 @@
     
     def __init__(self, pf, data_style='art'):
         self.data_style = data_style
-        self.field_info = ARTFieldContainer()
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
@@ -346,20 +347,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -372,7 +359,8 @@
 
 class ARTStaticOutput(StaticOutput):
     _hierarchy_class = ARTHierarchy
-    _fieldinfo_class = ARTFieldContainer
+    _fieldinfo_fallback = ARTFieldInfo
+    _fieldinfo_known = KnownARTFields
     _handle = None
     
     def __init__(self, filename, data_style='art',
@@ -382,7 +370,6 @@
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
         
-        self.field_info = self._fieldinfo_class()
         self.dimensionality = 3
         self.refine_by = 2
         self.parameters["HydroMethod"] = 'art'


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -24,7 +24,10 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    TranslationFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -34,15 +37,11 @@
 from yt.utilities.physical_constants import \
     boltzmann_constant_cgs, mass_hydrogen_cgs
 
-import pdb
+ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = ARTFieldInfo.add_field
 
-class ARTFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ARTFieldInfo = ARTFieldContainer()
-add_art_field = ARTFieldInfo.add_field
-
-add_field = add_art_field
+KnownARTFields = FieldInfoContainer()
+add_art_field = KnownARTFields.add_field
 
 translation_dict = {"Density":"density",
                     "TotalEnergy":"TotalEnergy",
@@ -54,33 +53,28 @@
                     "GasEnergy":"GasEnergy"
                    }
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
-
 for f,v in translation_dict.items():
-    if v not in ARTFieldInfo:
-        add_field(v, function=lambda a,b: None, take_log=False,
+    add_art_field(v, function=NullFunc, take_log=False,
                   validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+    add_art_field(f, function=TranslationFunc(v), take_log=True)
 
 #def _convertMetallicity(data):
 #    return data.convert("Metal_Density1")
-#ARTFieldInfo["Metal_Density1"]._units = r"1"
-#ARTFieldInfo["Metal_Density1"]._projected_units = r"1"
-#ARTFieldInfo["Metal_Density1"]._convert_function=_convertMetallicity
+#KnownARTFields["Metal_Density1"]._units = r"1"
+#KnownARTFields["Metal_Density1"]._projected_units = r"1"
+#KnownARTFields["Metal_Density1"]._convert_function=_convertMetallicity
 
 
 def _convertDensity(data):
     return data.convert("Density")
-ARTFieldInfo["Density"]._units = r"\rm{g}/\rm{cm}^3"
-ARTFieldInfo["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-ARTFieldInfo["Density"]._convert_function=_convertDensity
+KnownARTFields["Density"]._units = r"\rm{g}/\rm{cm}^3"
+KnownARTFields["Density"]._projected_units = r"\rm{g}/\rm{cm}^2"
+KnownARTFields["Density"]._convert_function=_convertDensity
 
 def _convertEnergy(data):
     return data.convert("GasEnergy")
-ARTFieldInfo["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
-ARTFieldInfo["GasEnergy"]._convert_function=_convertEnergy
+KnownARTFields["GasEnergy"]._units = r"\rm{ergs}/\rm{g}"
+KnownARTFields["GasEnergy"]._convert_function=_convertEnergy
 
 def _Temperature(field, data):
     tr  = data["GasEnergy"] / data["Density"]
@@ -89,9 +83,9 @@
     return tr
 def _convertTemperature(data):
     return data.convert("Temperature")
-add_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
-ARTFieldInfo["Temperature"]._units = r"\mathrm{K}"
-ARTFieldInfo["Temperature"]._convert_function=_convertTemperature
+add_art_field("Temperature", function=_Temperature, units = r"\mathrm{K}")
+KnownARTFields["Temperature"]._units = r"\mathrm{K}"
+KnownARTFields["Temperature"]._convert_function=_convertTemperature
 
 def _MetallicitySNII(field, data):
     #get the dimensionless mass fraction
@@ -99,8 +93,8 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
-ARTFieldInfo["MetallicitySNII"]._units = r"\mathrm{K}"
+add_art_field("MetallicitySNII", function=_MetallicitySNII, units = r"\mathrm{K}")
+KnownARTFields["MetallicitySNII"]._units = r"\mathrm{K}"
 
 def _MetallicitySNIa(field, data):
     #get the dimensionless mass fraction
@@ -108,8 +102,8 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
-ARTFieldInfo["MetallicitySNIa"]._units = r"\mathrm{K}"
+add_art_field("MetallicitySNIa", function=_MetallicitySNIa, units = r"\mathrm{K}")
+KnownARTFields["MetallicitySNIa"]._units = r"\mathrm{K}"
 
 def _Metallicity(field, data):
     #get the dimensionless mass fraction of the total metals
@@ -118,14 +112,14 @@
     tr *= data.pf.conversion_factors["Density"]    
     return tr
     
-add_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
-ARTFieldInfo["Metallicity"]._units = r"\mathrm{K}"
+add_art_field("Metallicity", function=_Metallicity, units = r"\mathrm{K}")
+KnownARTFields["Metallicity"]._units = r"\mathrm{K}"
 
 def _Metal_Density(field,data):
     return data["Metal_DensitySNII"]+data["Metal_DensitySNIa"]
 def _convert_Metal_Density(data):
     return data.convert("Metal_Density")
 
-add_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
-ARTFieldInfo["Metal_Density"]._units = r"\mathrm{K}"
-ARTFieldInfo["Metal_Density"]._convert_function=_convert_Metal_Density
+add_art_field("Metal_Density", function=_Metal_Density, units = r"\mathrm{K}")
+KnownARTFields["Metal_Density"]._units = r"\mathrm{K}"
+KnownARTFields["Metal_Density"]._convert_function=_convert_Metal_Density


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ b/yt/frontends/castro/api.py
@@ -34,7 +34,6 @@
       CastroStaticOutput
 
 from .fields import \
-      CastroFieldContainer, \
       CastroFieldInfo, \
       add_castro_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -27,27 +27,19 @@
 import os
 import weakref
 import itertools
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
 import numpy as na
 
-from collections import \
-    defaultdict
-from string import \
-    strip, \
-    rstrip
-from stat import \
-    ST_CTIME
-
 from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.data_objects.hierarchy import \
-           AMRHierarchy
-from yt.data_objects.static_output import \
-           StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion
-from yt.utilities.amr_utils import \
-    get_box_grids_level
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.data_objects.hierarchy import AMRHierarchy
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import mpc_conversion
+from yt.utilities.amr_utils import get_box_grids_level
 
 from .definitions import \
     castro2enzoDict, \
@@ -56,39 +48,40 @@
     castro_FAB_header_pattern, \
     castro_particle_field_names, \
     boxlib_bool_to_int
-
 from .fields import \
-    CastroFieldContainer, \
-    add_field
+    CastroFieldInfo, \
+    KnownCastroFields, \
+    add_castro_field
 
 
 class CastroGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions, start, stop, paranoia=False,**kwargs):
-        AMRGridPatch.__init__(self, index,**kwargs)
+
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
+                 dimensions, start, stop, paranoia=False, **kwargs):
+        super(CastroGrid, self).__init__(self, index, **kwargs)
         self.filename = filename
         self._offset = offset
-        self._paranoid = paranoia
+        self._paranoid = paranoia  # TODO: Factor this behavior out in tests
 
-        # should error check this
+        ### TODO: error check this (test)
         self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
         self.start_index = start.copy()#.transpose()
         self.stop_index = stop.copy()#.transpose()
         self.LeftEdge  = LeftEdge.copy()
         self.RightEdge = RightEdge.copy()
         self.index = index
-        self.Level = level
+        self.level = level
 
     def get_global_startindex(self):
         return self.start_index
 
     def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
-        # This is definitely the slowest part of generating the hierarchy
+        """ Copies all the appropriate attributes from the hierarchy. """
+        # This is definitely the slowest part of generating the hierarchy.
         # Now we give it pointers to all of its attributes
         # Note that to keep in line with Enzo, we have broken PEP-8
+
         h = self.hierarchy # cache it
         #self.StartIndices = h.gridStartIndices[self.id]
         #self.EndIndices = h.gridEndIndices[self.id]
@@ -100,6 +93,7 @@
         self.field_indexes = h.field_indexes
         self.Children = h.gridTree[self.id]
         pIDs = h.gridReverseTree[self.id]
+
         if len(pIDs) > 0:
             self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
         else:
@@ -115,6 +109,7 @@
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
             self.dds = na.array((RE-LE)/self.ActiveDimensions)
+
         if self.pf.dimensionality < 2: self.dds[1] = 1.0
         if self.pf.dimensionality < 3: self.dds[2] = 1.0
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@@ -124,86 +119,90 @@
 
 class CastroHierarchy(AMRHierarchy):
     grid = CastroGrid
+
     def __init__(self, pf, data_style='castro_native'):
-        self.field_info = CastroFieldContainer()
+        super(CastroHierarchy, self).__init__(self, pf, self.data_style)
+
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir, 'Header')
         self.directory = pf.fullpath
         self.data_style = data_style
-        #self._setup_classes()
 
         # This also sets up the grid objects
-        self.read_global_header(header_filename, self.parameter_file.paranoid_read)
+        self.read_global_header(header_filename,
+                                self.parameter_file.paranoid_read) 
         self.read_particle_header()
-        self.__cache_endianness(self.levels[-1].grids[-1])
-        AMRHierarchy.__init__(self, pf, self.data_style)
+        self._cache_endianness(self.levels[-1].grids[-1])
         self._setup_data_io()
         self._setup_field_list()
         self._populate_hierarchy()
 
     def read_global_header(self, filename, paranoid_read):
-        """
-        read the global header file for an Castro plotfile output.
-        """
+        """ Read the global header file for an Castro plotfile output. """
         counter = 0
-        header_file = open(filename,'r')
-        self.__global_header_lines = header_file.readlines()
+        header_file = open(filename, 'r')
+        self._global_header_lines = header_file.readlines()
 
         # parse the file
-        self.castro_version = self.__global_header_lines[0].rstrip()
-        self.n_fields      = int(self.__global_header_lines[1])
+        self.castro_version = self._global_header_lines[0].rstrip()
+        self.n_fields = int(self._global_header_lines[1])
 
-        counter = self.n_fields+2
+        counter = self.n_fields + 2
         self.field_list = []
-        for i, line in enumerate(self.__global_header_lines[2:counter]):
+        for i, line in enumerate(self._global_header_lines[2:counter]):
             self.field_list.append(line.rstrip())
 
         # this is unused...eliminate it?
         #for f in self.field_indexes:
         #    self.field_list.append(castro2ytFieldsDict.get(f, f))
 
-        self.dimension = int(self.__global_header_lines[counter])
+        self.dimension = int(self._global_header_lines[counter])
         if self.dimension != 3:
             raise RunTimeError("Castro must be in 3D to use yt.")
+
         counter += 1
-        self.Time = float(self.__global_header_lines[counter])
+        self.Time = float(self._global_header_lines[counter])
         counter += 1
-        self.finest_grid_level = int(self.__global_header_lines[counter])
+        self.finest_grid_level = int(self._global_header_lines[counter])
         self.n_levels = self.finest_grid_level + 1
         counter += 1
+
         # quantities with _unnecessary are also stored in the inputs
         # file and are not needed.  they are read in and stored in
         # case in the future we want to enable a "backwards" way of
         # taking the data out of the Header file and using it to fill
         # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainLeftEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.domainRightEdge_unnecessary = na.array(map(float, self.__global_header_lines[counter].split()))
+        self.domainRightEdge_unnecessary = na.array(map(float, self._global_header_lines[counter].split()))
         counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #na.array(map(int, self.__global_header_lines[counter].split()))
+        self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
+        #na.array(map(int, self._global_header_lines[counter].split()))
         counter += 1
-        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
-        #domain_re.search(self.__global_header_lines[counter]).groups()
+        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
+        #domain_re.search(self._global_header_lines[counter]).groups()
         counter += 1
-        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
         counter += 1
-        self.dx = na.zeros((self.n_levels,3))
+
+        self.dx = na.zeros((self.n_levels, 3))
         for i, line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
             self.dx[i] = na.array(map(float, line.split()))
         counter += self.n_levels
-        self.geometry = int(self.__global_header_lines[counter])
+        self.geometry = int(self._global_header_lines[counter])
         if self.geometry != 0:
             raise RunTimeError("yt only supports cartesian coordinates.")
         counter += 1
 
         # this is just to debug. eventually it should go away.
-        linebreak = int(self.__global_header_lines[counter])
+        linebreak = int(self._global_header_lines[counter])
         if linebreak != 0:
-            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+            raise RunTimeError("INTERNAL ERROR! Header is unexpected size")
         counter += 1
 
-        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        # Each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        # BoxLib madness
         self.levels = []
         grid_counter = 0
         file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
@@ -214,45 +213,50 @@
         data_files_finder = re.compile(data_files_pattern)
 
         for level in range(0, self.n_levels):
-            tmp = self.__global_header_lines[counter].split()
-            # should this be grid_time or level_time??
+            tmp = self._global_header_lines[counter].split()
+            # Should this be grid_time or level_time??
             lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
             counter += 1
-            nsteps = int(self.__global_header_lines[counter])
+            nsteps = int(self._global_header_lines[counter])
             counter += 1
             self.levels.append(CastroLevel(lev, ngrids))
-            # open level header, extract file names and offsets for
-            # each grid
-            # read slightly out of order here: at the end of the lo, hi
-            # pairs for x, y, z is a *list* of files types in the Level
-            # directory. each type has Header and a number of data
-            # files (one per processor)
+            # Open level header, extract file names and offsets for each grid.
+            # Read slightly out of order here: at the end of the lo, hi pairs
+            # for x, y, z is a *list* of files types in the Level directory. 
+            # Each type has Header and a number of data files
+            # (one per processor)
             tmp_offset = counter + 3*ngrids
             nfiles = 0
             key_off = 0
             files =   {} # dict(map(lambda a: (a,[]), self.field_list))
             offsets = {} # dict(map(lambda a: (a,[]), self.field_list))
-            while nfiles+tmp_offset < len(self.__global_header_lines) and data_files_finder.match(self.__global_header_lines[nfiles+tmp_offset]):
-                filen = os.path.join(self.parameter_file.fullplotdir, \
-                                     self.__global_header_lines[nfiles+tmp_offset].strip())
+
+            while (nfiles + tmp_offset < len(self._global_header_lines) and
+                   data_files_finder.match(self._global_header_lines[nfiles+tmp_offset])):
+                filen = os.path.join(self.parameter_file.fullplotdir,
+                                     self._global_header_lines[nfiles+tmp_offset].strip())
                 # open each "_H" header file, and get the number of
                 # components within it
                 level_header_file = open(filen+'_H','r').read()
                 start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
                 grid_file_offset = re_file_finder.findall(level_header_file)
                 ncomp_this_file = int(level_header_file.split('\n')[2])
+
                 for i in range(ncomp_this_file):
                     key = self.field_list[i+key_off]
                     f, o = zip(*grid_file_offset)
                     files[key] = f
                     offsets[key] = o
                     self.field_indexes[key] = i
+
                 key_off += ncomp_this_file
                 nfiles += 1
+
             # convert dict of lists to list of dicts
             fn = []
             off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,'Level_%i'%level)
+            lead_path = os.path.join(self.parameter_file.fullplotdir,
+                                     'Level_%i' % level)
             for i in range(ngrids):
                 fi = [os.path.join(lead_path, files[key][i]) for key in self.field_list]
                 of = [int(offsets[key][i]) for key in self.field_list]
@@ -262,21 +266,25 @@
             for grid in range(0, ngrids):
                 gfn = fn[grid]  # filename of file containing this grid
                 gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
-                ylo, yhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
-                zlo, zhi = map(float, self.__global_header_lines[counter].split())
-                counter+=1
+                xlo, xhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
+                ylo, yhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
+                zlo, zhi = map(float, self._global_header_lines[counter].split())
+                counter += 1
                 lo = na.array([xlo, ylo, zlo])
                 hi = na.array([xhi, yhi, zhi])
-                dims, start, stop = self.__calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter, level, gfn, gfo, dims, start, stop, paranoia=paranoid_read, hierarchy=self))
-                grid_counter += 1 # this is global, and shouldn't be reset
-                                  # for each level
+                dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
+                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
+                                                       level, gfn, gfo, dims,
+                                                       start, stop,
+                                                       paranoia=paranoid_read,  ### TODO: at least the code isn't schizophrenic paranoid
+                                                       hierarchy=self))
+                grid_counter += 1   # this is global, and shouldn't be reset
+                                    # for each level
 
             # already read the filenames above...
-            counter+=nfiles
+            counter += nfiles
             self.num_grids = grid_counter
             self.float_type = 'float64'
 
@@ -289,53 +297,55 @@
         if not self.parameter_file.use_particles:
             self.pgrid_info = na.zeros((self.num_grids, 3), dtype='int64')
             return
+
         self.field_list += castro_particle_field_names[:]
-        header = open(os.path.join(self.parameter_file.fullplotdir,
-                        "DM", "Header"))
+        header = open(os.path.join(self.parameter_file.fullplotdir, "DM",
+                                   "Header"))
         version = header.readline()
         ndim = header.readline()
         nfields = header.readline()
         ntotalpart = int(header.readline())
         dummy = header.readline() # nextid
         maxlevel = int(header.readline()) # max level
+
         # Skip over how many grids on each level; this is degenerate
         for i in range(maxlevel+1): dummy = header.readline()
         grid_info = na.fromiter((int(i)
-                    for line in header.readlines()
-                    for i in line.split()
-                    ),
-            dtype='int64', count=3*self.num_grids).reshape((self.num_grids, 3))
+                                 for line in header.readlines()
+                                 for i in line.split()),
+                                dtype='int64',
+                                count=3*self.num_grids).reshape((self.num_grids, 3))
         self.pgrid_info = grid_info
 
-    def __cache_endianness(self, test_grid):
+    def _cache_endianness(self, test_grid):
         """
-        Cache the endianness and bytes perreal of the grids by using a
-        test grid and assuming that all grids have the same
-        endianness. This is a pretty safe assumption since Castro uses
-        one file per processor, and if you're running on a cluster
-        with different endian processors, then you're on your own!
+        Cache the endianness and bytes perreal of the grids by using a test grid
+        and assuming that all grids have the same endianness. This is a pretty
+        safe assumption since Castro uses one file per processor, and if you're
+        running on a cluster with different endian processors, then you're on
+        your own!
+
         """
-        # open the test file & grab the header
-        inFile = open(os.path.expanduser(test_grid.filename[self.field_list[0]]),'rb')
-        header = inFile.readline()
-        inFile.close()
+        # open the test file and grab the header
+        in_file = open(os.path.expanduser(test_grid.filename[self.field_list[0]]), 'rb')
+        header = in_file.readline()
+        in_file.close()
         header.strip()
-
-        # parse it. the patter is in CastroDefs.py
-        headerRe = re.compile(castro_FAB_header_pattern)
-        bytesPerReal, endian, start, stop, centerType, nComponents = headerRe.search(header).groups()
-        self._bytesPerReal = int(bytesPerReal)
-        if self._bytesPerReal == int(endian[0]):
+        # Parse it. The pattern is in castro.definitions.py
+        header_re = re.compile(castro_FAB_header_pattern)
+        bytes_per_real, endian, start, stop, centerType, n_components = header_re.search(header).groups()
+        self._bytes_per_real = int(bytes_per_real)
+        if self._bytes_per_real == int(endian[0]):
             dtype = '<'
-        elif self._bytesPerReal == int(endian[-1]):
+        elif self._bytes_per_real == int(endian[-1]):
             dtype = '>'
         else:
             raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
 
-        dtype += ('f%i' % self._bytesPerReal) # always a floating point
+        dtype += ('f%i' % self._bytes_per_real) # always a floating point
         self._dtype = dtype
 
-    def __calculate_grid_dimensions(self, start_stop):
+    def _calculate_grid_dimensions(self, start_stop):
         start = na.array(map(int, start_stop[0].split(',')))
         stop = na.array(map(int, start_stop[1].split(',')))
         dimension = stop - start + 1
@@ -343,21 +353,28 @@
 
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
+
         self.grids = na.concatenate([level.grids for level in self.levels])
         basedir = self.parameter_file.fullplotdir
+
         for g, pg in itertools.izip(self.grids, self.pgrid_info):
             g.particle_filename = os.path.join(
                 basedir, "DM", "Level_%s" % (g.Level), "DATA_%04i" % pg[0])
             g.NumberOfParticles = pg[1]
             g._particle_offset = pg[2]
+
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
-        gls = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+
+        gls = na.concatenate([level.ngrids * [level.level] for level in self.levels])
         self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels], axis=0)
+        grid_dcs = na.concatenate([level.ngrids * [self.dx[level.level]]
+                                  for level in self.levels], axis=0)
+
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
         self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
+
         left_edges = []
         right_edges = []
         dims = []
@@ -365,23 +382,28 @@
             left_edges += [g.LeftEdge for g in level.grids]
             right_edges += [g.RightEdge for g in level.grids]
             dims += [g.ActiveDimensions for g in level.grids]
+
         self.grid_left_edge = na.array(left_edges)
         self.grid_right_edge = na.array(right_edges)
         self.grid_dimensions = na.array(dims)
         self.gridReverseTree = [] * self.num_grids
         self.gridReverseTree = [ [] for i in range(self.num_grids)]
         self.gridTree = [ [] for i in range(self.num_grids)]
+
         mylog.debug("Done creating grid objects")
 
     def _populate_hierarchy(self):
-        self.__setup_grid_tree()
+        self._setup_grid_tree()
         #self._setup_grid_corners()
+
         for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            if (i % 1e4) == 0:
+                mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+
             grid._prepare_grid()
             grid._setup_dx()
 
-    def __setup_grid_tree(self):
+    def _setup_grid_tree(self):
         mask = na.empty(self.grids.size, dtype='int32')
         for i, grid in enumerate(self.grids):
             get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
@@ -409,16 +431,20 @@
 
     def _setup_field_list(self):
         self.derived_field_list = []
+
         for field in self.field_info:
             try:
-                fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
+                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
             except:
                 continue
+
             available = na.all([f in self.field_list for f in fd.requested])
             if available: self.derived_field_list.append(field)
+
         for field in self.field_list:
             if field not in self.derived_field_list:
                 self.derived_field_list.append(field)
+
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -428,16 +454,18 @@
                         return data.convert(f)
                     return _convert_function
                 cf = external_wrapper(field)
-                # Note that we call add_field on the field_info directly.  This
+                # Note that we call add_castro_field on the field_info directly.  This
                 # will allow the same field detection mechanism to work for 1D, 2D
                 # and 3D fields.
-                self.pf.field_info.add_field(
+                self.pf.field_info.add_castro_field(
                         field, lambda a, b: None,
                         convert_function=cf, take_log=False,
                         particle_type=True)
 
+    ### TODO: check if this can be removed completely
     def _count_grids(self):
-        """this is already provided in
+        """
+        this is already provided in ???
 
         """
         pass
@@ -456,21 +484,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -489,19 +502,21 @@
         self.ngrids = ngrids
         self.grids = []
 
-
 class CastroStaticOutput(StaticOutput):
     """
-    This class is a stripped down class that simply reads and parses
-    *filename*, without looking at the Castro hierarchy.
+    This class is a stripped down class that simply reads and parses *filename*,
+    without looking at the Castro hierarchy.
+
     """
     _hierarchy_class = CastroHierarchy
-    _fieldinfo_class = CastroFieldContainer
+    _fieldinfo_fallback = CastroFieldInfo
+    _fieldinfo_known = KnownCastroFields
 
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='castro_native', paranoia=False,
                  storage_filename = None):
-        """need to override for Castro file structure.
+        """
+        Need to override for Castro file structure.
 
         the paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
@@ -512,6 +527,8 @@
          * ASCII (not implemented in yt)
 
         """
+        super(CastroStaticOutput, self).__init__(self, plotname.rstrip("/"),
+                                                 data_style='castro_native')
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia
         self.parameter_filename = paramFilename
@@ -520,13 +537,10 @@
 
         self.fparameters = {}
 
-        StaticOutput.__init__(self, plotname.rstrip("/"),
-                              data_style='castro_native')
-        self.field_info = self._fieldinfo_class()
-
         # These should maybe not be hardcoded?
+        ### TODO: this.
         self.parameters["HydroMethod"] = 'castro' # always PPM DE
-        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["Time"] = 1.0 # default unit is 1...
         self.parameters["DualEnergyFormalism"] = 0 # always off.
         self.parameters["EOSType"] = -1 # default
 
@@ -543,13 +557,17 @@
         # fill our args
         pname = args[0].rstrip("/")
         dn = os.path.dirname(pname)
-        if len(args) > 1: kwargs['paramFilename'] = args[1]
+        if len(args) > 1:
+            kwargs['paramFilename'] = args[1]
+
         pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
 
         # We check for the job_info file's existence because this is currently
         # what distinguishes Castro data from MAESTRO data.
+        ### ^ that is nuts
         pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
+        if not os.path.exists(pfn):
+            return False
         castro = any(("castro." in line for line in open(pfn)))
         nyx = any(("nyx." in line for line in open(pfn)))
         castro = castro and (not nyx) # it's only castro if it's not nyx
@@ -559,35 +577,37 @@
 
     def _parse_parameter_file(self):
         """
-        Parses the parameter file and establishes the various
-        dictionaries.
+        Parses the parameter file and establishes the various dictionaries.
+
         """
+        # Boxlib madness
         self.fullplotdir = os.path.abspath(self.parameter_filename)
         self._parse_header_file()
-        self.parameter_filename = self._localize(
-                self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(
-                self.fparameter_filename, 'probin')
+        self.parameter_filename = self._localize(self.__ipfn, 'inputs')
+        self.fparameter_filename = self._localize(self.fparameter_filename, 'probin')
         if os.path.isfile(self.fparameter_filename):
             self._parse_fparameter_file()
             for param in self.fparameters:
                 if castro2enzoDict.has_key(param):
-                    self.parameters[castro2enzoDict[param]]=self.fparameters[param]
+                    self.parameters[castro2enzoDict[param]] = self.fparameters[param]
+
         # Let's read the file
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
         lines = open(self.parameter_filename).readlines()
         self.use_particles = False
-        for lineI, line in enumerate(lines):
+
+        for line in lines:
             if line.find("#") >= 1: # Keep the commented lines...
-                line=line[:line.find("#")]
-            line=line.strip().rstrip()
+                line = line[:line.find("#")]
+            line = line.strip().rstrip()
             if len(line) < 2 or line.find("#") == 0: # ...but skip comments
                 continue
+
             try:
                 param, vals = map(strip, map(rstrip, line.split("=")))
             except ValueError:
                 mylog.error("ValueError: '%s'", line)
+
             if castro2enzoDict.has_key(param):
                 paramName = castro2enzoDict[param]
                 t = map(parameterDict[paramName], vals.split())
@@ -598,13 +618,10 @@
                         self.parameters[paramName] = t[0]
                     else:
                         self.parameters[paramName] = t
-
             elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = \
-                    na.array([float(i) for i in vals.split()])
+                self.domain_right_edge = na.array([float(i) for i in vals.split()])
             elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = \
-                    na.array([float(i) for i in vals.split()])
+                self.domain_left_edge = na.array([float(i) for i in vals.split()])
             elif param.startswith("particles.write_in_plotfile"):
                 self.use_particles = boxlib_bool_to_int(vals)
 
@@ -613,33 +630,38 @@
         self.domain_dimensions = self.parameters["TopGridDimensions"]
         self.refine_by = self.parameters.get("RefineBy", 2)
 
-        if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
+        if (self.parameters.has_key("ComovingCoordinates") and
+            bool(self.parameters["ComovingCoordinates"])):
             self.cosmological_simulation = 1
             self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
             self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
             self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
-            a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
+
+            # Stupid that we have to read a separate file for this :/
+            a_file = open(os.path.join(self.fullplotdir, "comoving_a"))
             line = a_file.readline().strip()
             a_file.close()
-            self.parameters["CosmologyCurrentRedshift"] = 1/float(line) - 1
+
+            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
             self.cosmological_scale_factor = float(line)
             self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
         else:
+            ### TODO: make these defaults automatic
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
     def _parse_fparameter_file(self):
         """
-        Parses the fortran parameter file for Castro. Most of this will
-        be useless, but this is where it keeps mu = mass per
-        particle/m_hydrogen.
+        Parses the fortran parameter file for Castro. Most of this will be
+        useless, but this is where it keeps mu = mass per particle/m_hydrogen.
+
         """
         lines = open(self.fparameter_filename).readlines()
         for line in lines:
             if line.count("=") == 1:
                 param, vals = map(strip, map(rstrip, line.split("=")))
                 if vals.count("'") == 0:
-                    t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                    t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
                 else:
                     t = vals.split()
                 if len(t) == 1:
@@ -649,36 +671,39 @@
 
     def _parse_header_file(self):
         """
-        Parses the BoxLib header file to get any parameters stored
-        there. Hierarchy information is read out of this file in
-        CastroHierarchy.
+        Parses the BoxLib header file to get any parameters stored there.
+        Hierarchy information is read out of this file in CastroHierarchy. 
 
         Currently, only Time is read here.
+
         """
-        header_file = open(os.path.join(self.fullplotdir,'Header'))
+        header_file = open(os.path.join(self.fullplotdir, "Header"))
         lines = header_file.readlines()
         header_file.close()
         n_fields = int(lines[1])
-        self.current_time = float(lines[3+n_fields])
-
-
+        self.current_time = float(lines[3 + n_fields])
 
     def _set_units(self):
         """
-        Generates the conversion to various physical _units based on the parameter file
+        Generates the conversion to various physical _units based on the
+        parameter file.
+
         """
         self.units = {}
         self.time_units = {}
+
         if len(self.parameters) == 0:
             self._parse_parameter_file()
+
         if self.cosmological_simulation:
-            cf = 1e5*(self.cosmological_scale_factor)
+            cf = 1e5 * self.cosmological_scale_factor   # Where does the 1e5 come from?
             for ax in 'xyz':
                 self.units['particle_velocity_%s' % ax] = cf
-            self.units['particle_mass'] = 1.989e33
+            self.units['particle_mass'] = 1.989e33  ### TODO: Make a global solar mass def
+
         mylog.warning("Setting 1.0 in code units to be 1.0 cm")
         if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            mylog.warning("No time units. Setting 1.0 = 1 second.")
             self.conversion_factors["Time"] = 1.0
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
@@ -688,8 +713,8 @@
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = 1 #self["Time"]
-        self.time_units['years'] = seconds / (365*3600*24.0)
-        self.time_units['days']  = seconds / (3600*24.0)
+        self.time_units['years'] = seconds / (365 * 3600 * 24.0)
+        self.time_units['days']  = seconds / (3600 * 24.0)
         for key in yt2castroFieldsDict:
             self.conversion_factors[key] = 1.0
         for key in castro_particle_field_names:


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/castro/fields.py
--- a/yt/frontends/castro/fields.py
+++ b/yt/frontends/castro/fields.py
@@ -21,106 +21,99 @@
 
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 """
-from yt.utilities.physical_constants import \
-    mh, kboltz
+
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    TranslationFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
     ValidateSpatial, \
     ValidateGridType
 import yt.data_objects.universal_fields
+from yt.utilities.physical_constants import mh, kboltz
 
-class CastroFieldContainer(CodeFieldInfoContainer):
-    """
-    All Castro-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-CastroFieldInfo = CastroFieldContainer()
-add_castro_field = CastroFieldInfo.add_field
+translation_dict = {
+    "x-velocity": "xvel",
+    "y-velocity": "yvel",
+    "z-velocity": "zvel",
+    "Density": "density",
+    "Total_Energy": "eden",
+    "Temperature": "temperature",
+    "x-momentum": "xmom",
+    "y-momentum": "ymom",
+    "z-momentum": "zmom"
+}
 
+# Setup containers for fields possibly in the output files
+KnownCastroFields = FieldInfoContainer()
+add_castro_field = KnownCastroFields.add_field
 
-add_field = add_castro_field
+# and always derived ones
+CastroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CastroFieldInfo.add_field
 
-# def _convertDensity(data):
-#     return data.convert("Density")
-add_field("density", function=lambda a, b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-CastroFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-#CastroFieldInfo["density"]._convert_function=_convertDensity
+# Start adding fields
+add_castro_field("density", function=NullFunc, take_log=True,
+                 units=r"\rm{g}/\rm{cm}^3")
 
-add_field("eden", function=lambda a, b: None, take_log=True,
-          validators = [ValidateDataField("eden")],
-          units=r"\rm{erg}/\rm{cm}^3")
+# fix projected units
+KnownCastroFields["density"]._projected_units = r"\rm{g}/\rm{cm}^2"
 
-add_field("xmom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("xmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_castro_field("eden", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("eden")],
+                 units=r"\rm{erg}/\rm{cm}^3")
 
-add_field("ymom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("ymom")],
-          units=r"\rm{gm}/\rm{cm^2\ s}")
+add_castro_field("xmom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("xmom")],
+                 units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_field("zmom", function=lambda a, b: None, take_log=False,
-          validators = [ValidateDataField("zmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_castro_field("ymom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("ymom")],
+                 units=r"\rm{gm}/\rm{cm^2\ s}")
 
-translation_dict = {"x-velocity": "xvel",
-                    "y-velocity": "yvel",
-                    "z-velocity": "zvel",
-                    "Density": "density",
-                    "Total_Energy": "eden",
-                    "Temperature": "temperature",
-                    "x-momentum": "xmom",
-                    "y-momentum": "ymom",
-                    "z-momentum": "zmom"
-                   }
+add_castro_field("zmom", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("zmom")],
+                 units=r"\rm{g}/\rm{cm^2\ s}")
 
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
+# Now populate derived fields
+for mine, theirs in translation_dict.items():
+    if KnownCastroFields.has_key(theirs):
+        add_field(theirs, function=TranslationFunc(mine),
+                  take_log=KnownCastroFields[theirs].take_log)
 
-for f, v in translation_dict.items():
-    if v not in CastroFieldInfo:
-        add_field(v, function=lambda a, b: None, take_log=False,
-                  validators = [ValidateDataField(v)])
-    #print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
+# Now fallbacks, in case these fields are not output
+def _xVelocity(field, data):
+    """ Generate x-velocity from x-momentum and density. """
+    return data["xmom"] / data["density"]
 
-def _xVelocity(field, data):
-    """generate x-velocity from x-momentum and density
-
-    """
-    return data["xmom"]/data["density"]
 add_field("x-velocity", function=_xVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _yVelocity(field, data):
-    """generate y-velocity from y-momentum and density
+    """ Generate y-velocity from y-momentum and density. """
+    return data["ymom"] / data["density"]
 
-    """
-    #try:
-    #    return data["xvel"]
-    #except KeyError:
-    return data["ymom"]/data["density"]
 add_field("y-velocity", function=_yVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _zVelocity(field, data):
-    """generate z-velocity from z-momentum and density
+    """ Generate z-velocity from z-momentum and density. """
+    return data["zmom"] / data["density"]
 
-    """
-    return data["zmom"]/data["density"]
 add_field("z-velocity", function=_zVelocity, take_log=False,
           units=r'\rm{cm}/\rm{s}')
 
 def _ThermalEnergy(field, data):
-    """generate thermal (gas energy). Dual Energy Formalism was
-        implemented by Stella, but this isn't how it's called, so I'll
-        leave that commented out for now.
+    """
+    Generate thermal (gas energy). Dual Energy Formalism was implemented by
+    Stella, but this isn't how it's called, so I'll leave that commented out for
+    now.
+
     """
     #if data.pf["DualEnergyFormalism"]:
     #    return data["Gas_Energy"]
@@ -129,26 +122,59 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
+
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field, data):
-    """M{(Gamma-1.0)*e, where e is thermal energy density
-       NB: this will need to be modified for radiation
     """
-    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+    M{(Gamma-1.0)*e, where e is thermal energy density
+    
+    NB: this will need to be modified for radiation
+
+    """
+    return (data.pf["Gamma"] - 1.0) * data["ThermalEnergy"]
+
 add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field, data):
-    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}", take_log=False)
+    return ((data.pf["Gamma"] - 1.0) * data.pf["mu"] * mh *
+            data["ThermalEnergy"] / (kboltz * data["Density"]))
+
+add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}",
+          take_log=False)
 
 def _convertParticleMassMsun(data):
-    return 1.0/1.989e33
+    return 1.0 / 1.989e33
 def _ParticleMassMsun(field, data):
     return data["particle_mass"]
+
 add_field("ParticleMassMsun",
           function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
           particle_type=True, convert_function=_convertParticleMassMsun,
           particle_convert_function=_ParticleMassMsun)
 
+# Fundamental fields that are usually/always output:
+#   density
+#   xmom
+#   ymom
+#   zmom
+#   rho_E
+#   rho_e
+#   Temp
+#
+# "Derived" fields that are sometimes output:
+#   x_velocity
+#   y_velocity
+#   z_velocity
+#   magvel
+#   grav_x
+#   grav_y
+#   grav_z
+#   maggrav
+#   magvort
+#   pressure
+#   entropy
+#   divu
+#   eint_e (e as derived from the "rho e" variable)
+#   eint_E (e as derived from the "rho E" variable)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/chombo/api.py
--- a/yt/frontends/chombo/api.py
+++ b/yt/frontends/chombo/api.py
@@ -34,7 +34,6 @@
       ChomboStaticOutput
 
 from .fields import \
-      ChomboFieldContainer, \
       ChomboFieldInfo, \
       add_chombo_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -55,7 +55,9 @@
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
      parallel_root_only
 
-from .fields import ChomboFieldContainer
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import ChomboFieldInfo, KnownChomboFields
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -92,7 +94,6 @@
         self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
         self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
         self.data_style = data_style
-        self.field_info = ChomboFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
@@ -162,9 +163,6 @@
                 g1.Parent.append(g)
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -176,7 +174,8 @@
 
 class ChomboStaticOutput(StaticOutput):
     _hierarchy_class = ChomboHierarchy
-    _fieldinfo_class = ChomboFieldContainer
+    _fieldinfo_fallback = ChomboFieldInfo
+    _fieldinfo_known = KnownChomboFields
     
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
@@ -185,7 +184,6 @@
         self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
         
     def _set_units(self):
         """


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -24,7 +24,9 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,47 +34,48 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class ChomboFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
+KnownChomboFields = FieldInfoContainer()
+add_chombo_field = KnownChomboFields.add_field
+
+ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_chombo_field = ChomboFieldInfo.add_field
 
 add_field = add_chombo_field
 
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators=[ValidateDataField("density")],
-          units=r"\rm{g} / \rm{cm}^3")
+add_field("density", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("density")],
+          units=r"\rm{g}/\rm{cm}^3")
+
 ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("X-Momentum")],
-          units=r"", display_name=r"x momentum")
+add_field("X-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("X-Momentum")],
+          units=r"",display_name=r"B_x")
 ChomboFieldInfo["X-momentum"]._projected_units=r""
 
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Y-Momentum")],
-          units=r"", display_name=r"y momentum")
+add_field("Y-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Y-Momentum")],
+          units=r"",display_name=r"B_y")
 ChomboFieldInfo["Y-momentum"]._projected_units=r""
 
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Z-Momentum")],
-          units=r"", display_name=r"z momentum")
+add_field("Z-momentum", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Z-Momentum")],
+          units=r"",display_name=r"B_z")
 ChomboFieldInfo["Z-momentum"]._projected_units=r""
 
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("X-Magnfield")],
-          units=r"", display_name=r"B_x")
+add_field("X-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("X-Magnfield")],
+          units=r"",display_name=r"B_x")
 ChomboFieldInfo["X-magnfield"]._projected_units=r""
 
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Y-Magnfield")],
-          units=r"", display_name=r"B_y")
+add_field("Y-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Y-Magnfield")],
+          units=r"",display_name=r"B_y")
 ChomboFieldInfo["Y-magnfield"]._projected_units=r""
 
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
-          validators=[ValidateDataField("Z-Magnfield")],
-          units=r"", display_name=r"B_z")
+add_field("Z-magnfield", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("Z-Magnfield")],
+          units=r"",display_name=r"B_z")
 ChomboFieldInfo["Z-magnfield"]._projected_units=r""
 
 def _MagneticEnergy(field,data):


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/enzo/api.py
--- a/yt/frontends/enzo/api.py
+++ b/yt/frontends/enzo/api.py
@@ -39,8 +39,9 @@
       EnzoStaticOutputInMemory
 
 from .fields import \
-      EnzoFieldContainer, \
       EnzoFieldInfo, \
+      Enzo2DFieldInfo, \
+      Enzo1DFieldInfo, \
       add_enzo_field, \
       add_enzo_1d_field, \
       add_enzo_2d_field


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -45,13 +45,17 @@
     AMRHierarchy
 from yt.data_objects.static_output import \
     StaticOutput
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.definitions import mpc_conversion
 from yt.utilities import hdf5_light_reader
 from yt.utilities.logger import ytLogger as mylog
 
 from .definitions import parameterDict
-from .fields import EnzoFieldContainer, Enzo1DFieldContainer, \
-    Enzo2DFieldContainer, add_enzo_field
+from .fields import \
+    EnzoFieldInfo, Enzo2DFieldInfo, Enzo1DFieldInfo, \
+    add_enzo_field, add_enzo_2d_field, add_enzo_1d_field, \
+    KnownEnzoFields
 
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_blocking_call
@@ -127,6 +131,56 @@
     def set_filename(self, filename):
         pass
 
+class EnzoGridGZ(EnzoGrid):
+
+    __slots__ = ()
+
+    def retrieve_ghost_zones(self, n_zones, fields, all_levels=False,
+                             smoothed=False):
+        # We ignore smoothed in this case.
+        if n_zones > 3:
+            return EnzoGrid.retrieve_ghost_zones(
+                self, n_zones, fields, all_levels, smoothed)
+        # ----- Below is mostly the original code, except we remove the field
+        # ----- access section
+        # We will attempt this by creating a datacube that is exactly bigger
+        # than the grid by nZones*dx in each direction
+        nl = self.get_global_startindex() - n_zones
+        nr = nl + self.ActiveDimensions + 2*n_zones
+        new_left_edge = nl * self.dds + self.pf.domain_left_edge
+        new_right_edge = nr * self.dds + self.pf.domain_left_edge
+        # Something different needs to be done for the root grid, though
+        level = self.Level
+        args = (level, new_left_edge, new_right_edge)
+        kwargs = {'dims': self.ActiveDimensions + 2*n_zones,
+                  'num_ghost_zones':n_zones,
+                  'use_pbar':False}
+        # This should update the arguments to set the field parameters to be
+        # those of this grid.
+        kwargs.update(self.field_parameters)
+        if smoothed:
+            #cube = self.hierarchy.smoothed_covering_grid(
+            #    level, new_left_edge, new_right_edge, **kwargs)
+            cube = self.hierarchy.smoothed_covering_grid(
+                level, new_left_edge, **kwargs)
+        else:
+            cube = self.hierarchy.covering_grid(
+                level, new_left_edge, **kwargs)
+        # ----- This is EnzoGrid.get_data, duplicated here mostly for
+        # ----  efficiency's sake.
+        sl = [slice(3 - n_zones, -(3 - n_zones)) for i in range(3)]
+        if fields is None: return cube
+        for field in ensure_list(fields):
+            if field in self.hierarchy.field_list:
+                conv_factor = 1.0
+                if self.pf.field_info.has_key(field):
+                    conv_factor = self.pf.field_info[field]._convert_function(self)
+                if self.pf.field_info[field].particle_type: continue
+                temp = self.hierarchy.io._read_raw_data_set(self, field)
+                temp = temp.swapaxes(0, 2)
+                cube.field_data[field] = na.multiply(temp, conv_factor, temp)[sl]
+        return cube
+
 class EnzoHierarchy(AMRHierarchy):
 
     _strip_path = False
@@ -205,7 +259,11 @@
                 list_of_sets = []
             if len(list_of_sets) == 0 and rank == 3:
                 mylog.debug("Detected packed HDF5")
-                self.data_style = 'enzo_packed_3d'
+                if self.parameters.get("WriteGhostZones", 0) == 1:
+                    self.data_style= "enzo_packed_3d_gz"
+                    self.grid = EnzoGridGZ
+                else:
+                    self.data_style = 'enzo_packed_3d'
             elif len(list_of_sets) > 0 and rank == 3:
                 mylog.debug("Detected unpacked HDF5")
                 self.data_style = 'enzo_hdf5'
@@ -254,7 +312,9 @@
                     self.__pointer_handler(vv)
         pbar.finish()
         self._fill_arrays(ei, si, LE, RE, np)
-        self.grids = na.array(self.grids, dtype='object')
+        temp_grids = na.empty(self.num_grids, dtype='object')
+        temp_grids[:] = self.grids
+        self.grids = temp_grids
         self.filenames = fn
         self._store_binary_hierarchy()
         t2 = time.time()
@@ -406,25 +466,6 @@
         self.save_data(list(field_list),"/","DataFields",passthrough=True)
         self.field_list = list(field_list)
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            # Note that we call add_field on the field_info directly.  This
-            # will allow the same field detection mechanism to work for 1D, 2D
-            # and 3D fields.
-            self.pf.field_info.add_field(
-                    field, lambda a, b: None,
-                    convert_function=cf, take_log=False)
-            
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
         for field in self.parameter_file.field_info:
@@ -631,7 +672,8 @@
     Enzo-specific output, set at a fixed time.
     """
     _hierarchy_class = EnzoHierarchy
-    _fieldinfo_class = EnzoFieldContainer
+    _fieldinfo_fallback = EnzoFieldInfo
+    _fieldinfo_known = KnownEnzoFields
     def __init__(self, filename, data_style=None,
                  file_style = None,
                  parameter_override = None,
@@ -674,11 +716,9 @@
         if self["TopGridRank"] == 1: self._setup_1d()
         elif self["TopGridRank"] == 2: self._setup_2d()
 
-        self.field_info = self._fieldinfo_class()
-
     def _setup_1d(self):
         self._hierarchy_class = EnzoHierarchy1D
-        self._fieldinfo_class = Enzo1DFieldContainer
+        self._fieldinfo_fallback = Enzo1DFieldInfo
         self.domain_left_edge = \
             na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
@@ -686,7 +726,7 @@
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
-        self._fieldinfo_class = Enzo2DFieldContainer
+        self._fieldinfo_fallback = Enzo2DFieldInfo
         self.domain_left_edge = \
             na.concatenate([self["DomainLeftEdge"], [0.0]])
         self.domain_right_edge = \
@@ -938,8 +978,6 @@
 
         StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
 
-        self.field_info = self._fieldinfo_class()
-
     def _parse_parameter_file(self):
         enzo = self._obtain_enzo()
         self.basename = "cycle%08i" % (


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -26,7 +26,10 @@
 import numpy as na
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,42 +38,43 @@
 import yt.data_objects.universal_fields
 from yt.utilities.physical_constants import \
     mh
+from yt.funcs import *
+
 import yt.utilities.amr_utils as amr_utils
 
-class EnzoFieldContainer(CodeFieldInfoContainer):
-    """
-    This is a container for Enzo-specific fields.
-    """
-    _shared_state = {}
-    _field_list = {}
-EnzoFieldInfo = EnzoFieldContainer()
-add_enzo_field = EnzoFieldInfo.add_field
+EnzoFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = EnzoFieldInfo.add_field
 
-add_field = add_enzo_field
+KnownEnzoFields = FieldInfoContainer()
+add_enzo_field = KnownEnzoFields.add_field
 
-_speciesList = ["HI","HII","Electron",
-               "HeI","HeII","HeIII",
-               "H2I","H2II","HM",
-               "DI","DII","HDI","Metal","PreShock"]
-_speciesMass = {"HI":1.0,"HII":1.0,"Electron":1.0,
-                "HeI":4.0,"HeII":4.0,"HeIII":4.0,
-                "H2I":2.0,"H2II":2.0,"HM":1.0,
-                "DI":2.0,"DII":2.0,"HDI":3.0}
+_speciesList = ["HI", "HII", "Electron",
+                "HeI", "HeII", "HeIII",
+                "H2I", "H2II", "HM",
+                "DI", "DII", "HDI", "Metal", "PreShock"]
+_speciesMass = {"HI": 1.0, "HII": 1.0, "Electron": 1.0,
+                "HeI": 4.0, "HeII": 4.0, "HeIII": 4.0,
+                "H2I": 2.0, "H2II": 2.0, "HM": 1.0,
+                "DI": 2.0, "DII": 2.0, "HDI": 3.0}
 
 def _SpeciesComovingDensity(field, data):
     sp = field.name.split("_")[0] + "_Density"
     ef = (1.0 + data.pf.current_redshift)**3.0
-    return data[sp]/ef
+    return data[sp] / ef
+
 def _SpeciesFraction(field, data):
     sp = field.name.split("_")[0] + "_Density"
-    return data[sp]/data["Density"]
+    return data[sp] / data["Density"]
+
 def _SpeciesMass(field, data):
     sp = field.name.split("_")[0] + "_Density"
     return data[sp] * data["CellVolume"]
+
 def _SpeciesNumberDensity(field, data):
     species = field.name.split("_")[0]
     sp = field.name.split("_")[0] + "_Density"
-    return data[sp]/_speciesMass[species]
+    return data[sp] / _speciesMass[species]
+
 def _convertCellMassMsun(data):
     return 5.027854e-34 # g^-1
 def _ConvertNumberDensity(data):
@@ -118,10 +122,10 @@
           validators=ValidateDataField("SN_Colour"),
           projection_conversion="1")
 
-add_field("Cooling_Time", units=r"\rm{s}",
-          function=lambda a, b: None,
-          validators=ValidateDataField("Cooling_Time"),
-          projection_conversion="1")
+add_enzo_field("Cooling_Time", units=r"\rm{s}",
+               function=NullFunc,
+               validators=ValidateDataField("Cooling_Time"),
+               projection_conversion="1")
 
 def _ThermalEnergy(field, data):
     if data.pf["HydroMethod"] == 2:
@@ -154,7 +158,9 @@
 def _convertEnergy(data):
     return data.convert("x-velocity")**2.0
 
-add_field("GasEnergy", function=lambda a, b: None,
+add_enzo_field("GasEnergy", function=NullFunc,
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+add_enzo_field("Gas_Energy", function=NullFunc,
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
 def _Gas_Energy(field, data):
@@ -162,7 +168,12 @@
 add_field("Gas_Energy", function=_Gas_Energy,
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
-add_field("TotalEnergy", function=lambda a, b: None,
+# We set up fields for both TotalEnergy and Total_Energy in the known fields
+# lists.  Note that this does not mean these will be the used definitions.
+add_enzo_field("TotalEnergy", function=NullFunc,
+          display_name = "\mathrm{Total}\/\mathrm{Energy}",
+          units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
+add_enzo_field("Total_Energy", function=NullFunc,
           display_name = "\mathrm{Total}\/\mathrm{Energy}",
           units=r"\rm{ergs}/\rm{g}", convert_function=_convertEnergy)
 
@@ -221,38 +232,46 @@
 
 for field in _default_fields:
     dn = field.replace("_","\/")
-    add_field(field, function=lambda a, b: None, take_log=True,
+    add_enzo_field(field, function=NullFunc, take_log=True,
               display_name = dn,
-              validators=[ValidateDataField(field)], units=r"\rm{g}/\rm{cm}^3")
-EnzoFieldInfo["x-velocity"].projection_conversion='1'
-EnzoFieldInfo["y-velocity"].projection_conversion='1'
-EnzoFieldInfo["z-velocity"].projection_conversion='1'
+              validators=[ValidateDataField(field)], units=r"Unknown")
+KnownEnzoFields["x-velocity"].projection_conversion='1'
+KnownEnzoFields["y-velocity"].projection_conversion='1'
+KnownEnzoFields["z-velocity"].projection_conversion='1'
+
+def _convertBfield(data): 
+    return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
+for field in ['Bx','By','Bz']:
+    f = KnownEnzoFields[field]
+    f._convert_function=_convertBfield
+    f._units=r"\mathrm{Gau\ss}"
+    f.take_log=False
 
 # Now we override
 
 def _convertDensity(data):
     return data.convert("Density")
 for field in ["Density"] + [ "%s_Density" % sp for sp in _speciesList ]:
-    EnzoFieldInfo[field]._units = r"\rm{g}/\rm{cm}^3"
-    EnzoFieldInfo[field]._projected_units = r"\rm{g}/\rm{cm}^2"
-    EnzoFieldInfo[field]._convert_function=_convertDensity
+    KnownEnzoFields[field]._units = r"\rm{g}/\rm{cm}^3"
+    KnownEnzoFields[field]._projected_units = r"\rm{g}/\rm{cm}^2"
+    KnownEnzoFields[field]._convert_function=_convertDensity
 
-add_field("Dark_Matter_Density", function=lambda a,b: None,
+add_enzo_field("Dark_Matter_Density", function=NullFunc,
           convert_function=_convertDensity,
           validators=[ValidateDataField("Dark_Matter_Density"),
                       ValidateSpatial(0)],
           display_name = "Dark\ Matter\ Density",
           not_in_all = True)
 
-EnzoFieldInfo["Temperature"]._units = r"\rm{K}"
-EnzoFieldInfo["Temperature"].units = r"K"
-EnzoFieldInfo["Dust_Temperature"]._units = r"\rm{K}"
-EnzoFieldInfo["Dust_Temperature"].units = r"K"
+KnownEnzoFields["Temperature"]._units = r"\rm{K}"
+KnownEnzoFields["Temperature"].units = r"K"
+KnownEnzoFields["Dust_Temperature"]._units = r"\rm{K}"
+KnownEnzoFields["Dust_Temperature"].units = r"K"
 
 def _convertVelocity(data):
     return data.convert("x-velocity")
 for ax in ['x','y','z']:
-    f = EnzoFieldInfo["%s-velocity" % ax]
+    f = KnownEnzoFields["%s-velocity" % ax]
     f._units = r"\rm{cm}/\rm{s}"
     f._convert_function = _convertVelocity
     f.take_log = False
@@ -378,7 +397,7 @@
 def _convertBfield(data): 
     return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
-    f = EnzoFieldInfo[field]
+    f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
     f._units=r"\mathrm{Gauss}"
     f.take_log=False
@@ -390,17 +409,95 @@
 
 add_field("Bmag", function=_Bmag,display_name=r"|B|",units=r"\mathrm{Gauss}")
 
+# Particle functions
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return na.array([], dtype=dtype)
+        try:
+            return io._read_data_set(data, p_field).astype(dtype)
+        except io._read_exception:
+            pass
+        # This is bad.  But it's the best idea I have right now.
+        return data._read_data(p_field.replace("_"," ")).astype(dtype)
+    return _Particles
+for pf in ["type", "mass"] + \
+          ["position_%s" % ax for ax in 'xyz']:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_enzo_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
     
+def _convRetainInt(data):
+    return 1
+add_enzo_field("particle_index", function=particle_func("particle_index", "int64"),
+          validators = [ValidateSpatial(0)], particle_type=True,
+          convert_function=_convRetainInt)
+
+def _get_vel_convert(ax):
+    def _convert_p_vel(data):
+        return data.convert("%s-velocity" % ax)
+    return _convert_p_vel
+for ax in 'xyz':
+    pf = "particle_velocity_%s" % ax
+    pfunc = particle_func(pf)
+    cfunc = _get_vel_convert(ax)
+    add_enzo_field(pf, function=pfunc, convert_function=cfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+for pf in ["creation_time", "dynamical_time", "metallicity_fraction"]:
+    pfunc = particle_func(pf)
+    add_enzo_field(pf, function=pfunc,
+              validators = [ValidateSpatial(0),
+                            ValidateDataField(pf)],
+              particle_type=True)
+add_field("particle_mass", function=particle_func("particle_mass"),
+          validators=[ValidateSpatial(0)], particle_type=True)
+
+def _ParticleAge(field, data):
+    current_time = data.pf.current_time
+    return (current_time - data["creation_time"])
+def _convertParticleAge(data):
+    return data.convert("years")
+add_field("ParticleAge", function=_ParticleAge,
+          validators=[ValidateDataField("creation_time")],
+          particle_type=True, convert_function=_convertParticleAge)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64') * \
+                just_one(data["CellVolumeCode"].ravel())
+    # Note that we mandate grid-type here, so this is okay
+    return particles
+
+def _convertParticleMass(data):
+    return data.convert("Density")*(data.convert("cm")**3.0)
+def _IOLevelParticleMass(grid):
+    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
+    return cf
+def _convertParticleMassMsun(data):
+    return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
+def _IOLevelParticleMassMsun(grid):
+    dd = dict(particle_mass = na.ones(1), CellVolumeCode=grid["CellVolumeCode"])
+    cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
+    return cf
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True, convert_function=_convertParticleMass,
+          particle_convert_function=_IOLevelParticleMass)
+add_field("ParticleMassMsun",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True, convert_function=_convertParticleMassMsun,
+          particle_convert_function=_IOLevelParticleMassMsun)
+
 #
 # Now we do overrides for 2D fields
 #
 
-class Enzo2DFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = EnzoFieldContainer._field_list.copy()
-# We make a copy of the dict from the other, so we
-# can now update it...
-Enzo2DFieldInfo = Enzo2DFieldContainer()
+Enzo2DFieldInfo = FieldInfoContainer.create_with_fallback(EnzoFieldInfo)
 add_enzo_2d_field = Enzo2DFieldInfo.add_field
 
 def _CellArea(field, data):
@@ -438,12 +535,7 @@
 # Now we do overrides for 1D fields
 #
 
-class Enzo1DFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = EnzoFieldContainer._field_list.copy()
-# We make a copy of the dict from the other, so we
-# can now update it...
-Enzo1DFieldInfo = Enzo1DFieldContainer()
+Enzo1DFieldInfo = FieldInfoContainer.create_with_fallback(EnzoFieldInfo)
 add_enzo_1d_field = Enzo1DFieldInfo.add_field
 
 def _CellLength(field, data):
@@ -474,7 +566,7 @@
 def _convertBfield(data): 
     return na.sqrt(4*na.pi*data.convert("Density")*data.convert("x-velocity")**2)
 for field in ['Bx','By','Bz']:
-    f = EnzoFieldInfo[field]
+    f = KnownEnzoFields[field]
     f._convert_function=_convertBfield
     f._units=r"\mathrm{Gauss}"
     f.take_log=False


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -181,8 +181,8 @@
         mylog.debug("Finished read of %s", sets)
 
     def _read_data_set(self, grid, field):
-        return hdf5_light_reader.ReadData(grid.filename,
-                "/Grid%08i/%s" % (grid.id, field)).swapaxes(0,2)
+        return self.modify(hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field)))
 
     def _read_data_slice(self, grid, field, axis, coord):
         axis = _axis_ids[axis]
@@ -197,6 +197,22 @@
     def _read_exception(self):
         return (exceptions.KeyError, hdf5_light_reader.ReadingError)
 
+class IOHandlerPackedHDF5GhostZones(IOHandlerPackedHDF5):
+    _data_style = "enzo_packed_3d_gz"
+
+    def modify(self, field):
+        tr = field[3:-3,3:-3,3:-3].swapaxes(0,2)
+        return tr.copy() # To ensure contiguous
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        axis = _axis_ids[axis]
+        return hdf5_light_reader.ReadDataSlice(grid.filename, "/Grid%08i/%s" %
+                        (grid.id, field), axis, coord)[3:-3,3:-3].transpose()
+
+    def _read_raw_data_set(self, grid, field):
+        return hdf5_light_reader.ReadData(grid.filename,
+                "/Grid%08i/%s" % (grid.id, field))
+
 class IOHandlerInMemory(BaseIOHandler):
 
     _data_style = "enzo_inline"


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/flash/api.py
--- a/yt/frontends/flash/api.py
+++ b/yt/frontends/flash/api.py
@@ -34,7 +34,6 @@
       FLASHStaticOutput
 
 from .fields import \
-      FLASHFieldContainer, \
       FLASHFieldInfo, \
       add_flash_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -40,9 +40,8 @@
 from yt.utilities.io_handler import \
     io_registry
 
-from .fields import \
-    FLASHFieldContainer, \
-    add_field
+from .fields import FLASHFieldInfo, add_flash_field, KnownFLASHFields
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
 
 class FLASHGrid(AMRGridPatch):
     _id_offset = 1
@@ -60,24 +59,19 @@
 class FLASHHierarchy(AMRHierarchy):
 
     grid = FLASHGrid
-    _handle = None
     
-    def __init__(self,pf,data_style='chombo_hdf5'):
+    def __init__(self,pf,data_style='flash_hdf5'):
         self.data_style = data_style
-        self.field_info = FLASHFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._handle = h5py.File(self.hierarchy_filename)
+        self._handle = pf._handle
 
         self.float_type = na.float64
         AMRHierarchy.__init__(self,pf,data_style)
 
-        self._handle.close()
-        self._handle = None
-
     def _initialize_data_storage(self):
         pass
 
@@ -102,7 +96,7 @@
     def _count_grids(self):
         try:
             self.num_grids = self.parameter_file._find_parameter(
-                "integer", "globalnumblocks", True, self._handle)
+                "integer", "globalnumblocks", True)
         except KeyError:
             self.num_grids = self._handle["/simulation parameters"][0][0]
         
@@ -114,9 +108,9 @@
         self.grid_right_edge[:] = f["/bounding box"][:,:,1]
         # Move this to the parameter file
         try:
-            nxb = pf._find_parameter("integer", "nxb", True, f)
-            nyb = pf._find_parameter("integer", "nyb", True, f)
-            nzb = pf._find_parameter("integer", "nzb", True, f)
+            nxb = pf._find_parameter("integer", "nxb", True)
+            nyb = pf._find_parameter("integer", "nyb", True)
+            nzb = pf._find_parameter("integer", "nzb", True)
         except KeyError:
             nxb, nyb, nzb = [int(f["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
@@ -152,22 +146,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            pfield = field.startswith("particle_")
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False,
-                      particle_type=pfield)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
         for field in self.parameter_file.field_info:
@@ -187,20 +165,21 @@
 
 class FLASHStaticOutput(StaticOutput):
     _hierarchy_class = FLASHHierarchy
-    _fieldinfo_class = FLASHFieldContainer
+    _fieldinfo_fallback = FLASHFieldInfo
+    _fieldinfo_known = KnownFLASHFields
     _handle = None
     
     def __init__(self, filename, data_style='flash_hdf5',
                  storage_filename = None,
                  conversion_override = None):
 
+        self._handle = h5py.File(filename, "r")
         if conversion_override is None: conversion_override = {}
         self._conversion_override = conversion_override
 
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-        self.field_info = self._fieldinfo_class()
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.
@@ -273,26 +252,17 @@
         for unit in mpc_conversion.keys():
             self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
 
-    def _find_parameter(self, ptype, pname, scalar = False, handle = None):
-        # We're going to implement handle caching eventually
-        if handle is None:
-            close = False
-            handle = self._handle
-        if handle is None:
-            close = True
-            handle = h5py.File(self.parameter_filename, "r")
+    def _find_parameter(self, ptype, pname, scalar = False):
         nn = "/%s %s" % (ptype,
                 {False: "runtime parameters", True: "scalars"}[scalar])
-        for tpname, pval in handle[nn][:]:
+        for tpname, pval in self._handle[nn][:]:
             if tpname.strip() == pname:
                 return pval
-        if close: handle.close()
         raise KeyError(pname)
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
-        self._handle = h5py.File(self.parameter_filename, "r")
         if "file format version" in self._handle:
             self._flash_version = int(
                 self._handle["file format version"][:])
@@ -308,15 +278,15 @@
 
         # Determine domain dimensions
         try:
-            nxb = self._find_parameter("integer", "nxb", scalar = True, handle = self._handle)
-            nyb = self._find_parameter("integer", "nyb", scalar = True, handle = self._handle)
-            nzb = self._find_parameter("integer", "nzb", scalar = True, handle = self._handle)
+            nxb = self._find_parameter("integer", "nxb", scalar = True)
+            nyb = self._find_parameter("integer", "nyb", scalar = True)
+            nzb = self._find_parameter("integer", "nzb", scalar = True)
         except KeyError:
             nxb, nyb, nzb = [int(self._handle["/simulation parameters"]['n%sb' % ax])
                               for ax in 'xyz']
-        nblockx = self._find_parameter("integer", "nblockx", handle = self._handle)
-        nblocky = self._find_parameter("integer", "nblockx", handle = self._handle)
-        nblockz = self._find_parameter("integer", "nblockx", handle = self._handle)
+        nblockx = self._find_parameter("integer", "nblockx")
+        nblocky = self._find_parameter("integer", "nblockx")
+        nblockz = self._find_parameter("integer", "nblockx")
         self.domain_dimensions = \
             na.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
 
@@ -329,7 +299,7 @@
 
         try:
             use_cosmo = self._find_parameter("logical", "usecosmology") 
-        except KeyError:
+        except:
             use_cosmo = 0
 
         if use_cosmo == 1:
@@ -342,14 +312,16 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def __del__(self):
         self._handle.close()
-        self._handle = None
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
             fileh = h5py.File(args[0],'r')
             if "bounding box" in fileh["/"].keys():
+                fileh.close()
                 return True
         except:
             pass


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/flash/fields.py
--- a/yt/frontends/flash/fields.py
+++ b/yt/frontends/flash/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,13 +33,12 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class FLASHFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-FLASHFieldInfo = FLASHFieldContainer()
-add_flash_field = FLASHFieldInfo.add_field
 
-add_field = add_flash_field
+KnownFLASHFields = FieldInfoContainer()
+add_flash_field = KnownFLASHFields.add_field
+
+FLASHFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = FLASHFieldInfo.add_field
 
 # Common fields in FLASH: (Thanks to John ZuHone for this list)
 #


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -38,7 +38,7 @@
         BaseIOHandler.__init__(self, *args, **kwargs)
         # Now we cache the particle fields
         self.pf = pf
-        self._handle = h5py.File(self.pf.parameter_filename, "r")
+        self._handle = pf._handle
         try :
             particle_fields = [s[0].strip() for s in
                                self._handle["/particle names"][:]]
@@ -47,9 +47,6 @@
         except KeyError:
             self._particle_fields = {}
 
-    def __del__(self):
-        self._handle.close()
-            
     def _read_particles(self, fields_to_read, type, args, grid_list,
             count_list, conv_factors):
         pass


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ b/yt/frontends/gadget/api.py
@@ -34,7 +34,6 @@
       GadgetStaticOutput
 
 from .fields import \
-      GadgetFieldContainer, \
       GadgetFieldInfo, \
       add_gadget_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ b/yt/frontends/gadget/data_structures.py
@@ -37,7 +37,9 @@
 from yt.data_objects.static_output import \
     StaticOutput
 
-from .fields import GadgetFieldContainer
+from .fields import GadgetFieldInfo, KnownGadgetFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 class GadgetGrid(AMRGridPatch):
     _id_offset = 0
@@ -69,7 +71,6 @@
     grid = GadgetGrid
 
     def __init__(self, pf, data_style='gadget_hdf5'):
-        self.field_info = GadgetFieldContainer()
         self.filename = pf.filename
         self.directory = os.path.dirname(pf.filename)
         self.data_style = data_style
@@ -135,19 +136,16 @@
             g._prepare_grid()
             g._setup_dx()
             
-        
-    def _setup_unknown_fields(self):
-        pass
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
 class GadgetStaticOutput(StaticOutput):
     _hierarchy_class = GadgetHierarchy
-    _fieldinfo_class = GadgetFieldContainer
+    _fieldinfo_fallback = GadgetFieldInfo
+    _fieldinfo_known = KnownGadgetFields
+
     def __init__(self, filename,storage_filename=None) :
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
         self.filename = filename
         
         StaticOutput.__init__(self, filename, 'gadget_infrastructure')


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ b/yt/frontends/gadget/fields.py
@@ -27,7 +27,8 @@
 
 from yt.funcs import *
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,10 +36,7 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class GadgetFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-GadgetFieldInfo = GadgetFieldContainer()
+GadgetFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_gadget_field = GadgetFieldInfo.add_field
 
 add_field = add_gadget_field


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gdf/api.py
--- a/yt/frontends/gdf/api.py
+++ b/yt/frontends/gdf/api.py
@@ -27,16 +27,17 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
-
 from .data_structures import \
-      ChomboGrid, \
-      ChomboHierarchy, \
-      ChomboStaticOutput
+      GDFGrid, \
+      GDFHierarchy, \
+      GDFStaticOutput
 
 from .fields import \
-      ChomboFieldContainer, \
-      ChomboFieldInfo, \
-      add_chombo_field
+      GDFFieldInfo, \
+      KnownGDFFields, \
+      add_gdf_field
 
 from .io import \
-      IOHandlerChomboHDF5
+      IOHandlerGDFHDF5
+
+


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -24,6 +24,9 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import h5py
+import numpy as na
+import weakref
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
            AMRGridPatch
@@ -32,7 +35,10 @@
 from yt.data_objects.static_output import \
            StaticOutput
 
-from .fields import GDFFieldContainer
+from .fields import GDFFieldInfo, KnownGDFFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+import pdb
 
 class GDFGrid(AMRGridPatch):
     _id_offset = 0
@@ -66,6 +72,7 @@
     
     def __init__(self, pf, data_style='grid_data_format'):
         self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
@@ -78,8 +85,7 @@
         pass
 
     def _detect_fields(self):
-        ncomp = int(self._fhandle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+        self.field_list = self._fhandle['field_types'].keys()
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -87,37 +93,31 @@
         self.object_types.sort()
 
     def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._fhandle[lev]['Processors'].len()
+        self.num_grids = self._fhandle['/grid_parent_id'].shape[0]
         
     def _parse_hierarchy(self):
-        f = self._fhandle # shortcut
+        f = self._fhandle 
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
         levels = f.listnames()[1:]
-        self.grids = []
-        i = 0
-        for lev in levels:
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            boxes = f[lev]['boxes'].value
-            dx = f[lev].attrs['dx']
-            for level_id, box in enumerate(boxes):
-                si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
-                ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(self.grids),self,level=level_number,
-                               start = si, stop = ei)
-                self.grids.append(pg)
-                self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type)
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type) + 1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        temp_grids = na.empty(len(grids), dtype='object')
-        for gi, g in enumerate(self.grids): temp_grids[gi] = g
-        self.grids = temp_grids
+        dxs=[]
+        self.grids = na.empty(self.num_grids, dtype='object')
+        for i, grid in enumerate(f['data'].keys()):
+            self.grids[i] = self.grid(i, self, f['grid_level'][i],
+                                      f['grid_left_index'][i],
+                                      f['grid_dimensions'][i])
+            self.grids[i]._level_id = f['grid_level'][i]
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx = dx/self.parameter_file.refine_by**(f['grid_level'][i])
+            dxs.append(dx)
+        dx = na.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*f['grid_left_index'][:]
+        self.grid_dimensions = f['grid_dimensions'][:].astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = f['grid_particle_count'][:]
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -144,16 +144,14 @@
 
 class GDFStaticOutput(StaticOutput):
     _hierarchy_class = GDFHierarchy
-    _fieldinfo_class = GDFFieldContainer
+    _fieldinfo_fallback = GDFFieldInfo
+    _fieldinfo_known = KnownGDFFields
     
     def __init__(self, filename, data_style='grid_data_format',
                  storage_filename = None):
         StaticOutput.__init__(self, filename, data_style)
-        self._handle = h5py.File(self.filename, "r")
         self.storage_filename = storage_filename
-        self.field_info = self._fieldinfo_class()
-        self._handle.close()
-        del self._handle
+        self.filename = filename
         
     def _set_units(self):
         """
@@ -165,24 +163,31 @@
             self._parse_parameter_file()
         self.time_units['1'] = 1
         self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_right_edge).max()
+        self.units['cm'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = 1
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
         # This should be improved.
+        self._handle = h5py.File(self.parameter_filename, "r")
         for field_name in self._handle["/field_types"]:
-            self.units[field_name] = self._handle["/%s/field_to_cgs" % field_name]
-
+            self.units[field_name] = self._handle["/field_types/%s" % field_name].attrs['field_to_cgs']
+        self._handle.close()
+        del self._handle
+        
     def _parse_parameter_file(self):
+        self._handle = h5py.File(self.parameter_filename, "r")
         sp = self._handle["/simulation_parameters"].attrs
         self.domain_left_edge = sp["domain_left_edge"][:]
         self.domain_right_edge = sp["domain_right_edge"][:]
-        self.refine_by = sp["refine_by"][:]
-        self.dimensionality = sp["dimensionality"][:]
-        self.current_time = sp["current_time"][:]
+        self.domain_dimensions = sp["domain_dimensions"][:]
+        self.refine_by = sp["refine_by"]
+        self.dimensionality = sp["dimensionality"]
+        self.current_time = sp["current_time"]
         self.unique_identifier = sp["unique_identifier"]
         self.cosmological_simulation = sp["cosmological_simulation"]
         if sp["num_ghost_zones"] != 0: raise RuntimeError
+        self.num_ghost_zones = sp["num_ghost_zones"]
         self.field_ordering = sp["field_ordering"]
         self.boundary_conditions = sp["boundary_conditions"][:]
         if self.cosmological_simulation:
@@ -193,7 +198,10 @@
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
-        
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+        del self._handle
+            
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -204,4 +212,6 @@
             pass
         return False
 
-
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+        


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gdf/fields.py
--- a/yt/frontends/gdf/fields.py
+++ b/yt/frontends/gdf/fields.py
@@ -1,5 +1,5 @@
 """
-Chombo-specific fields
+GDF-specific fields
 
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -24,90 +24,74 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
     ValidateSpatial, \
-    ValidateGridType
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
 import yt.data_objects.universal_fields
 
-class ChomboFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-ChomboFieldInfo = ChomboFieldContainer()
-add_chombo_field = ChomboFieldInfo.add_field
+log_translation_dict = {"Density": "density",
+                        "Pressure": "pressure"}
 
-add_field = add_chombo_field
+translation_dict = {"x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z"}
+                    
+# translation_dict = {"mag_field_x": "cell_centered_B_x ",
+#                     "mag_field_y": "cell_centered_B_y ",
+#                     "mag_field_z": "cell_centered_B_z "}
 
-add_field("density", function=lambda a,b: None, take_log=True,
+GDFFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = GDFFieldInfo.add_field
+
+KnownGDFFields = FieldInfoContainer()
+add_gdf_field = KnownGDFFields.add_field
+
+add_gdf_field("density", function=NullFunc, take_log=True,
           validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
+          units=r"\rm{g}/\rm{cm}^3",
+          projected_units =r"\rm{g}/\rm{cm}^2")
 
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_gdf_field("specific_energy", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("specific_energy")],
+          units=r"\rm{erg}/\rm{g}")
 
-add_field("X-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("X-Momentum")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_gdf_field("pressure", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("pressure")],
+          units=r"\rm{erg}/\rm{g}")
 
-add_field("Y-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Y-Momentum")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_gdf_field("velocity_x", function=NullFunc, take_log=True,
+          validators = [ValidateDataField("velocity_x")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Z-momentum", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Z-Momentum")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_gdf_field("velocity_y", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("velocity_y")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("X-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("X-Magnfield")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_gdf_field("velocity_z", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("velocity_z")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Y-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Y-Magnfield")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_gdf_field("mag_field_x", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_x")],
+          units=r"\rm{cm}/\rm{s}")
 
-add_field("Z-magnfield", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("Z-Magnfield")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_gdf_field("mag_field_y", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_y")],
+          units=r"\rm{cm}/\rm{s}")
 
-def _MagneticEnergy(field,data):
-    return (data["X-magnfield"]**2 +
-            data["Y-magnfield"]**2 +
-            data["Z-magnfield"]**2)/2.
-add_field("MagneticEnergy", function=_MagneticEnergy, take_log=True,
-          units=r"",display_name=r"B^2/8\pi")
-ChomboFieldInfo["MagneticEnergy"]._projected_units=r""
+add_gdf_field("mag_field_z", function=NullFunc, take_log=False,
+          validators = [ValidateDataField("mag_field_z")],
+          units=r"\rm{cm}/\rm{s}")
 
-def _xVelocity(field, data):
-    """generate x-velocity from x-momentum and density
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
 
-    """
-    return data["X-momentum"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
 
-def _yVelocity(field,data):
-    """generate y-velocity from y-momentum and density
-
-    """
-    #try:
-    #    return data["xvel"]
-    #except KeyError:
-    return data["Y-momentum"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field,data):
-    """generate z-velocity from z-momentum and density
-
-    """
-    return data["Z-momentum"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-    


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/gdf/io.py
--- a/yt/frontends/gdf/io.py
+++ b/yt/frontends/gdf/io.py
@@ -25,45 +25,48 @@
 """
 from yt.utilities.io_handler import \
            BaseIOHandler
+import h5py
 
-class IOHandlerChomboHDF5(BaseIOHandler):
-    _data_style = "chombo_hdf5"
+class IOHandlerGDFHDF5(BaseIOHandler):
+    _data_style = "grid_data_format"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
     def _field_dict(self,fhandle):
-        ncomp = int(fhandle['/'].attrs['num_components'])
-        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
-        val, keys = zip(*temp)
-        val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
+        keys = fhandle['field_types'].keys()
+        val = fhandle['field_types'].keys()
+        # ncomp = int(fhandle['/'].attrs['num_components'])
+        # temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        # val, keys = zip(*temp)
+        # val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
         
     def _read_field_names(self,grid):
         fhandle = h5py.File(grid.filename,'r')
-        ncomp = int(fhandle['/'].attrs['num_components'])
-        field_names = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
-        fhandle.close()
-        return field_names
+        return fhandle['field_types'].keys()
     
     def _read_data_set(self,grid,field):
         fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+        return fhandle['/data/grid_%010i/'%grid.id+field][:]
+        # field_dict = self._field_dict(fhandle)
+        # lstring = 'level_%i' % grid.Level
+        # lev = fhandle[lstring]
+        # dims = grid.ActiveDimensions
+        # boxsize = dims.prod()
+        
+        # grid_offset = lev[self._offset_string][grid._level_id]
+        # start = grid_offset+field_dict[field]*boxsize
+        # stop = start + boxsize
+        # data = lev[self._data_string][start:stop]
 
-        field_dict = self._field_dict(fhandle)
-        lstring = 'level_%i' % grid.Level
-        lev = fhandle[lstring]
-        dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
-        grid_offset = lev[self._offset_string][grid._level_id]
-        start = grid_offset+field_dict[field]*boxsize
-        stop = start + boxsize
-        data = lev[self._data_string][start:stop]
-        fhandle.close()
-        return data.reshape(dims, order='F')
+        # return data.reshape(dims, order='F')
                                           
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]
         sl[axis] = slice(coord, coord + 1)
-        return self._read_data_set(grid,field)[sl]
+        fhandle = h5py.File(grid.hierarchy.hierarchy_filename,'r')
+        return fhandle['/data/grid_%010i/'%grid.id+field][:][sl]
 
+    # return self._read_data_set(grid,field)[sl]
+


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/maestro/api.py
--- a/yt/frontends/maestro/api.py
+++ b/yt/frontends/maestro/api.py
@@ -36,7 +36,6 @@
       MaestroStaticOutput
 
 from .fields import \
-      MaestroFieldContainer, \
       MaestroFieldInfo, \
       add_maestro_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ b/yt/frontends/maestro/data_structures.py
@@ -54,9 +54,12 @@
     yt2maestroFieldsDict, \
     maestro_FAB_header_pattern
 
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from .fields import \
-    MaestroFieldContainer, \
-    add_field
+    MaestroFieldInfo, \
+    add_maestro_field, \
+    KnownMaestroFields
 
 
 class MaestroGrid(AMRGridPatch):
@@ -118,7 +121,6 @@
 class MaestroHierarchy(AMRHierarchy):
     grid = MaestroGrid
     def __init__(self, pf, data_style='maestro'):
-        self.field_info = MaestroFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir,'Header')
@@ -391,21 +393,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -431,7 +418,8 @@
     *filename*, without looking at the Maestro hierarchy.
     """
     _hierarchy_class = MaestroHierarchy
-    _fieldinfo_class = MaestroFieldContainer
+    _fieldinfo_fallback = MaestroFieldInfo
+    _fieldinfo_known = KnownMaestroFields
 
     def __init__(self, plotname, paramFilename=None, 
                  data_style='maestro', paranoia=False,
@@ -455,7 +443,6 @@
         # this is the unit of time; NOT the current time
         self.parameters["Time"] = 1 # second
 
-        self.field_info = self._fieldinfo_class()
         self._parse_header_file()
 
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/maestro/fields.py
--- a/yt/frontends/maestro/fields.py
+++ b/yt/frontends/maestro/fields.py
@@ -27,7 +27,8 @@
 from yt.utilities.physical_constants import \
     mh, kboltz
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -35,17 +36,11 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class MaestroFieldContainer(CodeFieldInfoContainer):
-    """
-    All Maestro-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-MaestroFieldInfo = MaestroFieldContainer()
-add_maestro_field = MaestroFieldInfo.add_field
+KnownMaestroFields = FieldInfoContainer()
+add_maestro_field = KnownMaestroFields.add_field
 
-
-add_field = add_maestro_field
+MaestroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = MaestroFieldInfo.add_field
 
 add_field("density", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("density")],


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/nyx/api.py
--- a/yt/frontends/nyx/api.py
+++ b/yt/frontends/nyx/api.py
@@ -25,5 +25,5 @@
 """
 
 from .data_structures import NyxGrid, NyxHierarchy, NyxStaticOutput
-from .fields import NyxFieldContainer, nyx_fields, add_nyx_field
+from .fields import NyxFieldInfo, KnownNyxFields, add_nyx_field
 from .io import IOHandlerNative


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/nyx/data_structures.py
--- a/yt/frontends/nyx/data_structures.py
+++ b/yt/frontends/nyx/data_structures.py
@@ -41,13 +41,15 @@
 from yt.data_objects.grid_patch import AMRGridPatch
 from yt.data_objects.hierarchy import AMRHierarchy
 from yt.data_objects.static_output import StaticOutput
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.amr_utils import get_box_grids_level
 from yt.utilities.definitions import mpc_conversion
 
 from .definitions import parameter_type_dict, nyx_to_enzo_dict, \
                          fab_header_pattern, nyx_particle_field_names
 from .utils import boxlib_bool_to_int
-from .fields import NyxFieldContainer, add_field
+from .fields import NyxFieldInfo, add_nyx_field, KnownNyxFields
 
 
 class NyxGrid(AMRGridPatch):
@@ -118,7 +120,6 @@
     grid = NyxGrid
 
     def __init__(self, pf, data_style="nyx_native"):
-        self.field_info = NyxFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         self.directory = pf.path
@@ -420,20 +421,6 @@
         return self.grids[mask]
 
     def _setup_field_list(self):
-        self.derived_field_list = []
-
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf=self.parameter_file)
-            except:
-                continue
-            available = na.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
         if self.parameter_file.use_particles:
             # We know which particle fields will exist -- pending further
             # changes in the future.
@@ -446,7 +433,7 @@
                 # Note that we call add_field on the field_info directly.  This
                 # will allow the same field detection mechanism to work for 1D,
                 # 2D and 3D fields.
-                self.pf.field_info.add_field(field, lambda a, b: None,
+                self.pf.field_info.add_field(field, NullFunc,
                                              convert_function=cf,
                                              take_log=False, particle_type=True)
 
@@ -468,23 +455,19 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        # not sure what the case for this is.
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+        for field in self.parameter_file.field_info:
+            try:
+                fd = self.parameter_file.field_info[field].get_dependencies(
+                            pf = self.parameter_file)
+            except:
+                continue
+            available = na.all([f in self.field_list for f in fd.requested])
+            if available: self.derived_field_list.append(field)
         for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None, convert_function=cf,
-                      take_log=False)
-
-    def _setup_derived_fields(self):
-        pass
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
 
     def _initialize_state_variables(self):
         """
@@ -509,7 +492,8 @@
 
     """
     _hierarchy_class = NyxHierarchy
-    _fieldinfo_class = NyxFieldContainer
+    _fieldinfo_fallback = NyxFieldInfo
+    _fieldinfo_known = KnownNyxFields
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
@@ -569,9 +553,6 @@
         # ``self.print_key_parameters()``
         StaticOutput.__init__(self, plotname.rstrip("/"), data_style=data_style)
 
-        # @todo: field pruning should happen here
-        self.field_info = self._fieldinfo_class()
-
         # @todo: check all of these and hopefully factor out of the constructor.
         # These should maybe not be hardcoded?
         self.parameters["HydroMethod"] = "nyx"  # always PPM DE


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/nyx/fields.py
--- a/yt/frontends/nyx/fields.py
+++ b/yt/frontends/nyx/fields.py
@@ -29,28 +29,26 @@
 
 import yt.data_objects.universal_fields
 
-from yt.data_objects.field_info_container import CodeFieldInfoContainer, \
+from yt.data_objects.field_info_container import FieldInfoContainer, \
+    NullFunc, TranslationFunc, FieldInfo, \
     ValidateParameter, ValidateDataField, ValidateProperty, ValidateSpatial, \
     ValidateGridType
 from yt.utilities.physical_constants import mh, kboltz
 
-class NyxFieldContainer(CodeFieldInfoContainer):
-    """ All nyx-specific fields are stored in here. """
-    _shared_state = {}
-    _field_list = {}
+NyxFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = NyxFieldInfo.add_field
 
-nyx_fields = NyxFieldContainer()
-add_field = nyx_fields.add_field
-add_nyx_field = add_field  # alias for API
+KnownNyxFields = FieldInfoContainer()
+add_nyx_field = KnownNyxFields.add_field 
 
 # Density
-add_field("density", function=lambda a, b: None, take_log=True,
+add_nyx_field("density", function=lambda a, b: None, take_log=True,
           validators=[ValidateDataField("density")],
           units=r"\rm{g}} / \rm{cm}^3",
           projected_units =r"\rm{g}} / \rm{cm}^2")
-nyx_fields["density"]._projected_units =r"\rm{g}} / \rm{cm}^2"
+KnownNyxFields["density"]._projected_units =r"\rm{g}} / \rm{cm}^2"
 
-add_field("Density", function=lambda a, b: b["density"], take_log=True,
+add_field("Density", function=TranslationFunc("density"), take_log=True,
           units=r"\rm{g}} / \rm{cm}^3",
           projected_units =r"\rm{g}} / \rm{cm}^2")
 
@@ -61,28 +59,30 @@
     return data["particle_mass"]
 add_field("ParticleMassMsun", function=_particle_mass_m_sun,
           validators=[ValidateSpatial(0), ValidateDataField("particle_mass")],
-          particle_type=True, convert_function=_convertParticleMassMsun, take_log=True, units=r"\rm{M_{\odot}}")
+          particle_type=True, convert_function=_convertParticleMassMsun,
+          take_log=True, units=r"\rm{M_{\odot}}")
           
-add_field("Dark_Matter_Density", function=lambda a, b: b["particle_mass_density"], take_log=True,
+add_nyx_field("Dark_Matter_Density", function=TranslationFunc("particle_mass_density"),
+          take_log=True,
           units=r"\rm{g}} / \rm{cm}^3",particle_type=True,
           projected_units =r"\rm{g}} / \rm{cm}^2")
 
 
 # Energy Density
 # @todo: ``energy_density``
-add_field("total_energy", function=lambda a, b: None, take_log=True,
+add_nyx_field("total_energy", function=lambda a, b: None, take_log=True,
           validators=[ValidateDataField("total_energy")],
           units=r"\rm{M_{\odot}} (\rm{km} / \rm{s})^2")
 
 # Momentum in each dimension.
 # @todo: ``momentum_x``
-add_field("x-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("x-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("x-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
-add_field("y-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("y-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("y-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
-add_field("z-momentum", function=lambda a, b: None, take_log=False,
+add_nyx_field("z-momentum", function=lambda a, b: None, take_log=False,
           validators=[ValidateDataField("z-momentum")],
           units=r"\rm{M_{\odot}} \rm{km} / \rm{s}")
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/nyx/io.py
--- a/yt/frontends/nyx/io.py
+++ b/yt/frontends/nyx/io.py
@@ -28,7 +28,7 @@
 
 import os
 import numpy as na
-from yt.utilities.amr_utils import read_castro_particles
+from yt.utilities.amr_utils import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import BaseIOHandler
 
 from definitions import fab_header_pattern, nyx_particle_field_names, \
@@ -57,80 +57,24 @@
         if field in nyx_particle_field_names:
             return self._read_particle_field(grid, field)
         filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen, 'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        """
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            header_re = re.compile(fab_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = \
-                headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian.
-            # @todo: this code is ugly.
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i' % bytesPerReal)  # always a floating point
-
-            # determine size of FAB
-            start = na.array(map(int, start.split(',')))
-            stop = na.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." % grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." % grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." % grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." % grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." % grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-        else:
-        """
-        start = grid.start_index
-        stop = grid.stop_index
-        dtype = grid.hierarchy._dtype
+        offset1 = grid._offset[field]
+        # one field has nElements * bytesPerReal bytes and is located
+        # nElements * bytesPerReal * field_index from the offset location
         bytesPerReal = grid.hierarchy._bytesPerReal
 
+        fieldname = yt_to_nyx_fields_dict.get(field, field)
+        field_index = grid.field_indexes[fieldname]
         nElements = grid.ActiveDimensions.prod()
+        offset2 = int(nElements*bytesPerReal*field_index)
 
-        # one field has nElements * bytesPerReal bytes and is located
-        # nElements * bytesPerReal * field_index from the offset location
-        if yt_to_nyx_fields_dict.has_key(field):
-            fieldname = yt_to_nyx_fields_dict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = na.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        dtype = grid.hierarchy._dtype
+        field = na.empty(nElements, dtype=grid.hierarchy._dtype)
+        read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # @todo: we can/should also check against the max and min in the header
         # file
 
-        inFile.close()
         return field
 
     def _read_data_slice(self, grid, field, axis, coord):


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/orion/api.py
--- a/yt/frontends/orion/api.py
+++ b/yt/frontends/orion/api.py
@@ -34,7 +34,6 @@
       OrionStaticOutput
 
 from .fields import \
-      OrionFieldContainer, \
       OrionFieldInfo, \
       add_orion_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/orion/data_structures.py
--- a/yt/frontends/orion/data_structures.py
+++ b/yt/frontends/orion/data_structures.py
@@ -23,46 +23,41 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+import os
 import re
-import os
 import weakref
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
 import numpy as na
 
-from collections import \
-    defaultdict
-from string import \
-    strip, \
-    rstrip
-from stat import \
-    ST_CTIME
-
 from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.data_objects.hierarchy import \
-           AMRHierarchy
-from yt.data_objects.static_output import \
-           StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.data_objects.hierarchy import AMRHierarchy
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import mpc_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
+    parallel_root_only
 
 from .definitions import \
     orion2enzoDict, \
     parameterDict, \
     yt2orionFieldsDict, \
     orion_FAB_header_pattern
-
 from .fields import \
-    OrionFieldContainer, \
-    add_field
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
 
 
 class OrionGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions,start,stop,paranoia=False,**kwargs):
-        AMRGridPatch.__init__(self, index,**kwargs)
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
+                 dimensions, start, stop, paranoia=False, **kwargs):
+        AMRGridPatch.__init__(self, index, **kwargs)
         self.filename = filename
         self._offset = offset
         self._paranoid = paranoia
@@ -122,7 +117,6 @@
 class OrionHierarchy(AMRHierarchy):
     grid = OrionGrid
     def __init__(self, pf, data_style='orion_native'):
-        self.field_info = OrionFieldContainer()
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir,'Header')
@@ -399,21 +393,6 @@
     def _detect_fields(self):
         pass
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
-
     def _setup_derived_fields(self):
         pass
 
@@ -439,7 +418,8 @@
     *filename*, without looking at the Orion hierarchy.
     """
     _hierarchy_class = OrionHierarchy
-    _fieldinfo_class = OrionFieldContainer
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
 
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='orion_native', paranoia=False,
@@ -461,7 +441,6 @@
 
         StaticOutput.__init__(self, plotname.rstrip("/"),
                               data_style='orion_native')
-        self.field_info = self._fieldinfo_class()
 
         # These should maybe not be hardcoded?
         self.parameters["HydroMethod"] = 'orion' # always PPM DE


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -25,7 +25,8 @@
 from yt.utilities.physical_constants import \
     mh, kboltz
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -33,25 +34,17 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class OrionFieldContainer(CodeFieldInfoContainer):
-    """
-    All Orion-specific fields are stored in here.
-    """
-    _shared_state = {}
-    _field_list = {}
-OrionFieldInfo = OrionFieldContainer()
-add_orion_field = OrionFieldInfo.add_field
 
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
 
-add_field = add_orion_field
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
 
-# def _convertDensity(data):
-#     return data.convert("Density")
 add_field("density", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("density")],
           units=r"\rm{g}/\rm{cm}^3")
 OrionFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-#OrionFieldInfo["density"]._convert_function=_convertDensity
 
 add_field("eden", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("eden")],


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/orion/io.py
--- a/yt/frontends/orion/io.py
+++ b/yt/frontends/orion/io.py
@@ -111,7 +111,7 @@
         field_index = grid.field_indexes[fieldname]
         inFile.seek(int(nElements*bytesPerReal*field_index),1)
         field = na.fromfile(inFile,count=nElements,dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
+        field = field.reshape(grid.ActiveDimensions, order='F')
 
         # we can/should also check against the max and min in the header file
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ b/yt/frontends/ramses/api.py
@@ -34,7 +34,6 @@
       RAMSESStaticOutput
 
 from .fields import \
-      RAMSESFieldContainer, \
       RAMSESFieldInfo, \
       add_ramses_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -39,13 +39,15 @@
     import _ramses_reader
 except ImportError:
     _ramses_reader = None
-from .fields import RAMSESFieldContainer
+from .fields import RAMSESFieldInfo, KnownRAMSESFields
 from yt.utilities.definitions import \
     mpc_conversion
 from yt.utilities.amr_utils import \
     get_box_grids_level
 from yt.utilities.io_handler import \
     io_registry
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 
 def num_deep_inc(f):
     def wrap(self, *args, **kwargs):
@@ -108,7 +110,6 @@
     
     def __init__(self,pf,data_style='ramses'):
         self.data_style = data_style
-        self.field_info = RAMSESFieldContainer()
         self.parameter_file = weakref.proxy(pf)
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
@@ -265,20 +266,6 @@
             g._setup_dx()
         self.max_level = self.grid_levels.max()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            if field in self.parameter_file.field_info: continue
-            mylog.info("Adding %s to list of fields", field)
-            cf = None
-            if self.parameter_file.has_key(field):
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-            add_field(field, lambda a, b: None,
-                      convert_function=cf, take_log=False)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
@@ -287,7 +274,8 @@
 
 class RAMSESStaticOutput(StaticOutput):
     _hierarchy_class = RAMSESHierarchy
-    _fieldinfo_class = RAMSESFieldContainer
+    _fieldinfo_fallback = RAMSESFieldInfo
+    _fieldinfo_known = KnownRAMSESFields
     _handle = None
     
     def __init__(self, filename, data_style='ramses',
@@ -297,8 +285,6 @@
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
 
-        self.field_info = self._fieldinfo_class()
-
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
         


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/ramses/fields.py
--- a/yt/frontends/ramses/fields.py
+++ b/yt/frontends/ramses/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,13 +33,12 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class RAMSESFieldContainer(CodeFieldInfoContainer):
-    _shared_state = {}
-    _field_list = {}
-RAMSESFieldInfo = RAMSESFieldContainer()
-add_ramses_field = RAMSESFieldInfo.add_field
 
-add_field = add_ramses_field
+KnownRAMSESFields = FieldInfoContainer()
+add_ramses_field = KnownRAMSESFields.add_field
+
+RAMSESFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = RAMSESFieldInfo.add_field
 
 known_ramses_fields = [
     "Density",


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -6,6 +6,7 @@
     config = Configuration('frontends',parent_package,top_path)
     config.make_config_py() # installs __config__.py
     #config.make_svn_version_py()
+    config.add_subpackage("gdf")
     config.add_subpackage("chombo")
     config.add_subpackage("enzo")
     config.add_subpackage("flash")


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -36,12 +36,15 @@
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
 from yt.utilities.amr_utils import \
     get_box_grids_level
 
 from .fields import \
     StreamFieldContainer, \
-    add_stream_field
+    add_stream_field, \
+    KnownStreamFields
 
 class StreamGrid(AMRGridPatch):
     """
@@ -244,6 +247,7 @@
 class StreamStaticOutput(StaticOutput):
     _hierarchy_class = StreamHierarchy
     _fieldinfo_class = StreamFieldContainer
+    _fieldinfo_known = KnownStreamFields
     _data_style = 'stream'
 
     def __init__(self, stream_handler):
@@ -255,7 +259,6 @@
         self.stream_handler = stream_handler
         StaticOutput.__init__(self, "InMemoryParameterFile", self._data_style)
 
-        self.field_info = self._fieldinfo_class()
         self.units = {}
         self.time_units = {}
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/tiger/api.py
--- a/yt/frontends/tiger/api.py
+++ b/yt/frontends/tiger/api.py
@@ -34,7 +34,6 @@
       TigerStaticOutput
 
 from .fields import \
-      TigerFieldContainer, \
       TigerFieldInfo, \
       add_tiger_field
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/tiger/data_structures.py
--- a/yt/frontends/tiger/data_structures.py
+++ b/yt/frontends/tiger/data_structures.py
@@ -31,7 +31,9 @@
 from yt.data_objects.static_output import \
            StaticOutput
 
-from .fields import TigerFieldContainer
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import TigerFieldInfo, KnownTigerFields
 
 class TigerGrid(AMRGridPatch):
     _id_offset = 0
@@ -126,16 +128,13 @@
     def field_list(self):
         return self.file_mapping.keys()
 
-    def _setup_unknown_fields(self):
-        for field in self.field_list:
-            add_tiger_field(field, lambda a, b: None)
-
     def _setup_derived_fields(self):
         self.derived_field_list = []
 
 class TigerStaticOutput(StaticOutput):
     _hierarchy_class = TigerHierarchy
-    _fieldinfo_class = TigerFieldContainer
+    _fieldinfo_fallback = TigerFieldInfo
+    _fieldinfo_known = KnownTigerFields
 
     def __init__(self, rhobname, root_size, max_grid_size=128,
                  data_style='tiger', storage_filename = None):
@@ -151,7 +150,8 @@
         if not iterable(max_grid_size): max_grid_size = (max_grid_size,) * 3
         self.max_grid_size = max_grid_size
 
-        self.field_info = self._fieldinfo_class()
+        self.field_info = FieldInfoContainer.create_with_fallback(
+                            self._fieldinfo_fallback)
 
         # We assume that we have basename + "rhob" and basename + "temp"
         # to get at our various parameters.


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/frontends/tiger/fields.py
--- a/yt/frontends/tiger/fields.py
+++ b/yt/frontends/tiger/fields.py
@@ -24,7 +24,8 @@
 """
 
 from yt.data_objects.field_info_container import \
-    CodeFieldInfoContainer, \
+    FieldInfoContainer, \
+    FieldInfo, \
     ValidateParameter, \
     ValidateDataField, \
     ValidateProperty, \
@@ -32,12 +33,9 @@
     ValidateGridType
 import yt.data_objects.universal_fields
 
-class TigerFieldContainer(CodeFieldInfoContainer):
-    """
-    This is a container for Tiger-specific fields.
-    """
-    _shared_state = {}
-    _field_list = {}
-TigerFieldInfo = TigerFieldContainer()
-add_tiger_field = TigerFieldInfo.add_field
+KnownTigerFields = FieldInfoContainer()
+add_tiger_field = KnownTigerFields.add_field
 
+TigerFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = TigerFieldInfo.add_field
+


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -124,14 +124,14 @@
     """
     Returning resident size in megabytes
     """
+    pid = os.getpid()
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return 0
-    pid = os.getpid()
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return 0.0
+        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs
@@ -195,6 +195,11 @@
         return func(*args, **kwargs)
     return check_parallel_rank
 
+def rootloginfo(*args):
+    from yt.config import ytcfg
+    if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: return
+    mylog.info(*args)
+
 def deprecate(func):
     """
     This decorator issues a deprecation warning.
@@ -250,16 +255,32 @@
     *num_up* refers to how many frames of the stack get stripped off, and
     defaults to 1 so that this function itself is stripped off.
     """
-    from IPython.Shell import IPShellEmbed
+
+    import IPython
+    if IPython.__version__.startswith("0.10"):
+       api_version = '0.10'
+    elif IPython.__version__.startswith("0.11"):
+       api_version = '0.11'
+
     stack = inspect.stack()
     frame = inspect.stack()[num_up]
     loc = frame[0].f_locals.copy()
     glo = frame[0].f_globals
     dd = dict(fname = frame[3], filename = frame[1],
               lineno = frame[2])
-    ipshell = IPShellEmbed()
-    ipshell(header = __header % dd,
-            local_ns = loc, global_ns = glo)
+    if api_version == '0.10':
+        ipshell = IPython.Shell.IPShellEmbed()
+        ipshell(header = __header % dd,
+                local_ns = loc, global_ns = glo)
+    else:
+        from IPython.config.loader import Config
+        cfg = Config()
+        cfg.InteractiveShellEmbed.local_ns = loc
+        cfg.InteractiveShellEmbed.global_ns = glo
+        IPython.embed(config=cfg, banner2 = __header % dd)
+        from IPython.frontend.terminal.embed import InteractiveShellEmbed
+        ipshell = InteractiveShellEmbed(config=cfg)
+
     del ipshell
 
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/bottle_mods.py
--- a/yt/gui/reason/bottle_mods.py
+++ b/yt/gui/reason/bottle_mods.py
@@ -149,6 +149,7 @@
             print "WARNING: %s has no _route_prefix attribute.  Not notifying."
             continue
             w._route_prefix = token
+    repl._global_token = token
     repl.activate()
     repl.execution_thread.wait()
     print


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/extdirect_repl.py
--- a/yt/gui/reason/extdirect_repl.py
+++ b/yt/gui/reason/extdirect_repl.py
@@ -220,6 +220,7 @@
                               _resources = ("/resources/:path#.+#", "GET"),
                               _philogl = ("/philogl/:path#.+#", "GET"),
                               _js = ("/js/:path#.+#", "GET"),
+                              _leaflet = ("/leaflet/:path#.+#", "GET"),
                               _images = ("/images/:path#.+#", "GET"),
                               _theme = ("/theme/:path#.+#", "GET"),
                               _session_py = ("/session.py", "GET"),
@@ -340,6 +341,13 @@
             return
         return open(pp).read()
 
+    def _leaflet(self, path):
+        pp = os.path.join(local_dir, "html", "leaflet", path)
+        if not os.path.exists(pp):
+            response.status = 404
+            return
+        return open(pp).read()
+
     def _images(self, path):
         pp = os.path.join(local_dir, "html", "images", path)
         if not os.path.exists(pp):
@@ -515,6 +523,21 @@
                                          'widget_data_name': '_twidget_data'})
 
     @lockit
+    def create_mapview(self, widget_name):
+        # We want multiple maps simultaneously
+        uu = "/%s/%s" % (getattr(self, "_global_token", ""),
+                        str(uuid.uuid1()).replace("-","_"))
+        from .pannable_map import PannableMapServer
+        data = self.locals[widget_name].data_source
+        field_name = self.locals[widget_name]._current_field
+        pm = PannableMapServer(data, field_name, route_prefix = uu)
+        self.locals['_tpm'] = pm
+        self.locals['_twidget_data'] = {'prefix': uu, 'field':field_name}
+        self.execution_thread.queue.put({'type': 'add_widget',
+                                         'name': '_tpm',
+                                         'widget_data_name': '_twidget_data'})
+
+    @lockit
     def create_slice(self, pfname, center, axis, field, onmax):
         if not onmax: 
             center_string = \


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/html/index.html
--- a/yt/gui/reason/html/index.html
+++ b/yt/gui/reason/html/index.html
@@ -80,6 +80,10 @@
          In that case, it will default to whatever is in the family. --><link rel="stylesheet" type="text/css" href="http://fonts.googleapis.com/css?family=Inconsolata">
 
+    <!-- LEAFLET STUFF -->
+    <script type="text/javascript" src="leaflet/leaflet.js"></script>
+    <link rel="stylesheet" href="leaflet/leaflet.css" />
+
     <!-- LIBS --><script type="text/javascript" src="resources/adapter/ext/ext-base.js"></script><script type="text/javascript" src="resources/ext-all.js"></script>
@@ -119,6 +123,9 @@
     <!-- THE GRID VIEWER FUNCTIONS --><script type="text/javascript" src="js/widget_isocontour.js"></script>
 
+    <!-- THE PANNABLE MAP FUNCTIONS -->
+    <script type="text/javascript" src="js/widget_pannablemap.js"></script>
+
     <script id="gv-shader-fs" type="x-shader/x-fragment">
     #ifdef GL_ES
     precision highp float;


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/html/js/widget_pannablemap.js
--- /dev/null
+++ b/yt/gui/reason/html/js/widget_pannablemap.js
@@ -0,0 +1,76 @@
+/**********************************************************************
+The Pannable Map Widget
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+***********************************************************************/
+
+var WidgetPannableMap = function(python_varname, widget_data) {
+    this.id = python_varname;
+    this.widget_data = widget_data;
+
+    viewport.get("center-panel").add(
+        {
+            xtype: 'panel',
+            id: "pm_" + this.id,
+            title: "Pannable Map",
+            iconCls: 'graph',
+            autoScroll: true,
+            layout:'absolute',
+            closable: true,
+            items: [ 
+                {
+                    xtype:'box',
+                    autoEl: {
+                        tag: 'div',
+                        id: "map_" + this.id,
+                        width: 512,
+                        height: 512,
+                    },
+                    x: 10,
+                    y: 10,
+                    width: 512,
+                    height: 512,
+                    listeners: {afterrender:
+                        function() {
+                          var map = new L.Map('map_' + python_varname, {
+                                  center: new L.LatLng(0.0, 0.0),
+                                  zoom: 0,
+                                  });
+                          var cloudmadeUrl = widget_data['prefix'] + '/map/{z}/{x}/{y}.png';
+                          cloudmade = new L.TileLayer(cloudmadeUrl, {maxZoom: 18});
+                          map.addLayer(cloudmade);
+                    }},
+                }  
+            ]
+        }
+    );
+
+    viewport.get("center-panel").activate("pm_" + this.id);
+    viewport.doLayout();
+    this.panel = viewport.get("center-panel").get("pm_" + this.id);
+    this.panel.doLayout();
+    examine = this.panel;
+
+    this.accept_results = function(payload) { }
+}
+
+widget_types['pannable_map'] = WidgetPannableMap;


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -331,6 +331,21 @@
                         }); 
                     }
                 },{
+                    xtype: 'button',
+                    text: 'Pannable Map',
+                    x: 10,
+                    y: 335,
+                    width: 80,
+                    tooltip: "Open a pannable map in a new tab",
+                    handler: function(b,e) {
+                        img_data = image_dom.src;
+                        yt_rpc.ExtDirectREPL.create_mapview(
+                            {widget_name:python_varname},
+                        function(rv) {
+                            /*alert(rv);*/
+                        }); 
+                    }
+                },{
                     xtype: 'panel',
                     layout: 'vbox',
                     id: 'rhs_panel_' + python_varname,


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -47,14 +47,16 @@
     return func
 
 class PannableMapServer(object):
-    def __init__(self, data, field):
+    _widget_name = "pannable_map"
+    def __init__(self, data, field, route_prefix = ""):
         self.data = data
         self.pf = data.pf
         self.field = field
-        bottle.route("/map/:L/:x/:y.png")(self.map)
-        bottle.route("/")(self.index)
-        bottle.route("/index.html")(self.index)
-        bottle.route("/static/:filename#.+#")(self.static)
+        
+        bottle.route("%s/map/:L/:x/:y.png" % route_prefix)(self.map)
+        bottle.route("%s/" % route_prefix)(self.index)
+        bottle.route("%s/index.html" % route_prefix)(self.index)
+        bottle.route("%s/static/:filename#.+#" % route_prefix)(self.static)
         # This is a double-check, since we do not always mandate this for
         # slices:
         self.data[self.field] = self.data[self.field].astype("float64")


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -38,9 +38,15 @@
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
 from yt.utilities.performance_counters import yt_counters, time_function
-from yt.config import ytcfg
+from yt.config import ytcfg, ytcfgDefaults
 import yt.utilities.physical_constants as physical_constants
 
+from yt.utilities.logger import level as __level
+if __level >= int(ytcfgDefaults["loglevel"]):
+    # This won't get displayed.
+    mylog.debug("Turning off NumPy error reporting")
+    na.seterr(all = 'ignore')
+
 from yt.data_objects.api import \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     data_object_registry, \
@@ -60,7 +66,7 @@
     CastroStaticOutput, CastroFieldInfo, add_castro_field
 
 from yt.frontends.nyx.api import \
-    NyxStaticOutput, nyx_fields, add_nyx_field
+    NyxStaticOutput, NyxFieldInfo, add_nyx_field
 
 from yt.frontends.orion.api import \
     OrionStaticOutput, OrionFieldInfo, add_orion_field
@@ -77,6 +83,9 @@
 from yt.frontends.chombo.api import \
     ChomboStaticOutput, ChomboFieldInfo, add_chombo_field
 
+from yt.frontends.gdf.api import \
+    GDFStaticOutput, GDFFieldInfo, add_gdf_field
+
 from yt.frontends.art.api import \
     ARTStaticOutput, ARTFieldInfo, add_art_field
 
@@ -105,11 +114,15 @@
     ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
     HomogenizedVolume, Camera, off_axis_projection
 
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects
+
 for name, cls in callback_registry.items():
     exec("%s = cls" % name)
 
 from yt.convenience import all_pfs, max_spheres, load, projload
 
+
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions
 # that get used every time that they don't have to *define* every time.


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/_amr_utils/PointsInVolume.pyx
--- a/yt/utilities/_amr_utils/PointsInVolume.pyx
+++ b/yt/utilities/_amr_utils/PointsInVolume.pyx
@@ -215,3 +215,38 @@
                 break
     return good
 
+def calculate_fill_grids(int fill_level, int refratio, int last_level,
+                         np.ndarray[np.int64_t, ndim=1] domain_width,
+                         np.ndarray[np.int64_t, ndim=1] cg_start_index,
+                         np.ndarray[np.int32_t, ndim=1] cg_dims,
+                         np.ndarray[np.int64_t, ndim=1] g_start_index,
+                         np.ndarray[np.int32_t, ndim=1] g_dims,
+                         np.ndarray[np.int32_t, ndim=3] g_child_mask):
+    cdef np.int64_t cgstart[3], gstart[3]
+    cdef np.int64_t cgend[3], gend[3]
+    cdef np.int64_t dw[3]
+    cdef np.int64_t cxi, cyi, czi, gxi, gyi, gzi, ci, cj, ck
+    cdef int i, total
+    for i in range(3):
+        dw[i] = domain_width[i]
+        cgstart[i] = cg_start_index[i]
+        gstart[i] = g_start_index[i]
+        cgend[i] = cgstart[i] + cg_dims[i]
+        gend[i] = gstart[i] + g_dims[i]
+    for cxi in range(cgstart[0], cgend[0]+1):
+        ci = (cxi % dw[0])
+        if ci < 0: ci += dw[0]
+        if ci < gstart[0]*refratio or ci >= gend[0]*refratio: continue
+        gxi = (<np.int64_t> (ci / refratio)) - gstart[0]
+        for cyi in range(cgstart[1], cgend[1]):
+            cj = (cyi % dw[1])
+            if cj < 0: cj += dw[1]
+            if cj < gstart[1]*refratio or cj >= gend[1]*refratio: continue
+            gyi = (<np.int64_t> (cj / refratio)) - gstart[1]
+            for czi in range(cgstart[2], cgend[2]):
+                ck = (czi % dw[2])
+                if ck < 0: ck += dw[2]
+                if ck < gstart[2]*refratio or cj >= gend[2]*refratio: continue
+                gzi = (<np.int64_t> (ck / refratio)) - gstart[2]
+                if last_level or g_child_mask[gxi, gyi, gzi] > 0: total += 1
+    return total


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/_amr_utils/VolumeIntegrator.pyx
--- a/yt/utilities/_amr_utils/VolumeIntegrator.pyx
+++ b/yt/utilities/_amr_utils/VolumeIntegrator.pyx
@@ -357,14 +357,14 @@
                                          tf_obj.tables[i].y))
             self.field_tables[i].field_id = tf_obj.field_ids[i]
             self.field_tables[i].weight_field_id = tf_obj.weight_field_ids[i]
-            print "Field table", i, "corresponds to",
-            print self.field_tables[i].field_id,
-            print "(Weighted with ", self.field_tables[i].weight_field_id,
-            print ")"
+            #print "Field table", i, "corresponds to",
+            #print self.field_tables[i].field_id,
+            #print "(Weighted with ", self.field_tables[i].weight_field_id,
+            #print ")"
 
         for i in range(6):
             self.field_table_ids[i] = tf_obj.field_table_ids[i]
-            print "Channel", i, "corresponds to", self.field_table_ids[i]
+            #print "Channel", i, "corresponds to", self.field_table_ids[i]
             
     @cython.boundscheck(False)
     @cython.wraparound(False)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/_amr_utils/fortran_reader.pyx
--- a/yt/utilities/_amr_utils/fortran_reader.pyx
+++ b/yt/utilities/_amr_utils/fortran_reader.pyx
@@ -28,6 +28,7 @@
 cimport cython
 
 from stdio cimport fopen, fclose, FILE
+cimport libc.stdlib as stdlib
 
 #cdef inline int imax(int i0, int i1):
     #if i0 > i1: return i0
@@ -48,6 +49,21 @@
     int fseek(FILE *stream, long offset, int whence)
     size_t fread(void *ptr, size_t size, size_t nmemb, FILE *stream)
     long ftell(FILE *stream)
+    char *fgets(char *s, int size, FILE *stream)
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+def read_and_seek(char *filename, int offset1, int offset2,
+                  np.ndarray buffer, int bytes):
+    cdef FILE *f = fopen(filename, "rb")
+    cdef void *buf = <void *> buffer.data
+    cdef char line[1024]
+    cdef size_t n = 1023
+    fseek(f, offset1, SEEK_SET)
+    fgets(line, n, f)
+    fseek(f, offset2, SEEK_CUR)
+    fread(buf, 1, bytes, f)
+    fclose(f)
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -148,6 +148,49 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def obtain_rvec(data):
+    # This is just to let the pointers exist and whatnot.  We can't cdef them
+    # inside conditionals.
+    cdef np.ndarray[np.float64_t, ndim=1] xf
+    cdef np.ndarray[np.float64_t, ndim=1] yf
+    cdef np.ndarray[np.float64_t, ndim=1] zf
+    cdef np.ndarray[np.float64_t, ndim=2] rf
+    cdef np.ndarray[np.float64_t, ndim=3] xg
+    cdef np.ndarray[np.float64_t, ndim=3] yg
+    cdef np.ndarray[np.float64_t, ndim=3] zg
+    cdef np.ndarray[np.float64_t, ndim=4] rg
+    cdef np.float64_t c[3]
+    cdef int i, j, k
+    center = data.get_field_parameter("center")
+    c[0] = center[0]; c[1] = center[1]; c[2] = center[2]
+    if len(data['x'].shape) == 1:
+        # One dimensional data
+        xf = data['x']
+        yf = data['y']
+        zf = data['z']
+        rf = np.empty((3, xf.shape[0]), 'float64')
+        for i in range(xf.shape[0]):
+            rf[0, i] = xf[i] - c[0]
+            rf[1, i] = yf[i] - c[1]
+            rf[2, i] = zf[i] - c[2]
+        return rf
+    else:
+        # Three dimensional data
+        xg = data['x']
+        yg = data['y']
+        zg = data['z']
+        rg = np.empty((3, xg.shape[0], xg.shape[1], xg.shape[2]), 'float64')
+        for i in range(xg.shape[0]):
+            for j in range(xg.shape[1]):
+                for k in range(xg.shape[2]):
+                    rg[0,i,j,k] = xg[i,j,k] - c[0]
+                    rg[1,i,j,k] = yg[i,j,k] - c[1]
+                    rg[2,i,j,k] = zg[i,j,k] - c[2]
+        return rg
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def kdtree_get_choices(np.ndarray[np.float64_t, ndim=3] data,
                        np.ndarray[np.float64_t, ndim=1] l_corner,
                        np.ndarray[np.float64_t, ndim=1] r_corner):


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -466,14 +466,29 @@
             print "Could not load file."
             sys.exit()
         import yt.mods
-        from IPython.Shell import IPShellEmbed
+
+        import IPython
+        if IPython.__version__.startswith("0.10"):
+            api_version = '0.10'
+        elif IPython.__version__.startswith("0.11"):
+            api_version = '0.11'
+
         local_ns = yt.mods.__dict__.copy()
         local_ns['pf'] = pf
-        shell = IPShellEmbed()
-        shell(local_ns = local_ns,
-              header =
-            "\nHi there!  Welcome to yt.\n\nWe've loaded your parameter file as 'pf'.  Enjoy!"
-             )
+
+        if api_version == '0.10':
+            shell = IPython.Shell.IPShellEmbed()
+            shell(local_ns = local_ns,
+                  header =
+                  "\nHi there!  Welcome to yt.\n\nWe've loaded your parameter file as 'pf'.  Enjoy!"
+                  )
+        else:
+            from IPython.config.loader import Config
+            cfg = Config()
+            cfg.InteractiveShellEmbed.local_ns = local_ns
+            IPython.embed(config=cfg)
+            from IPython.frontend.terminal.embed import InteractiveShellEmbed
+            ipshell = InteractiveShellEmbed(config=cfg)
 
     @add_cmd_options(['outputfn','bn','thresh','dm_only','skip'])
     @check_args
@@ -573,7 +588,7 @@
         else:
             p = pc.add_slice(opts.field, opts.axis)
         from yt.gui.reason.pannable_map import PannableMapServer
-        mapper = PannableMapServer(p.field_data, opts.field)
+        mapper = PannableMapServer(p.data, opts.field)
         import yt.utilities.bottle as bottle
         bottle.debug(True)
         if opts.host is not None:
@@ -1058,6 +1073,19 @@
             print
             loki = raw_input("Press enter to go on, Ctrl-C to exit.")
             cedit.config.setoption(uu, hgrc_path, "bb.username=%s" % bbusername)
+        bb_fp = "81:2b:08:90:dc:d3:71:ee:e0:7c:b4:75:ce:9b:6c:48:94:56:a1:fe"
+        if uu.config("hostfingerprints", "bitbucket.org", None) is None:
+            print "Let's also add bitbucket.org to the known hosts, so hg"
+            print "doesn't warn us about bitbucket."
+            print "We will add this:"
+            print
+            print "   [hostfingerprints]"
+            print "   bitbucket.org = %s" % (bb_fp)
+            print
+            loki = raw_input("Press enter to go on, Ctrl-C to exit.")
+            cedit.config.setoption(uu, hgrc_path,
+                                   "hostfingerprints.bitbucket.org=%s" % bb_fp)
+
         # We now reload the UI's config file so that it catches the [bb]
         # section changes.
         uu.readconfig(hgrc_path[0])
@@ -1570,7 +1598,7 @@
             save_name = "%s"%pf+"_"+field+"_rendering.png"
         if not '.png' in save_name:
             save_name += '.png'
-        if cam._par_rank != -1:
+        if cam.comm.rank != -1:
             write_bitmap(image,save_name)
         
 


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -937,6 +937,15 @@
 /* These functions are both called with
     func(cubedata, griddata) */
 
+static void dcNothing(PyArrayObject* c_data, npy_int64 xc, npy_int64 yc, npy_int64 zc,
+                     PyArrayObject* g_data, npy_int64 xg, npy_int64 yg, npy_int64 zg)
+{
+    return;
+}
+
+/* These functions are both called with
+    func(cubedata, griddata) */
+
 static void dcRefine(PyArrayObject* c_data, npy_int64 xc, npy_int64 yc, npy_int64 zc,
                      PyArrayObject* g_data, npy_int64 xg, npy_int64 yg, npy_int64 zg)
 {
@@ -1107,36 +1116,37 @@
     cdx = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 0));
     cdy = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 1));
     cdz = (*(npy_int32 *) PyArray_GETPTR1(oc_dims, 2));
-    cxe = (cxs + cdx - 1);
-    cye = (cys + cdy - 1);
-    cze = (czs + cdz - 1);
+    cxe = (cxs + cdx);
+    cye = (cys + cdy);
+    cze = (czs + cdz);
 
     /* It turns out that C89 doesn't define a mechanism for choosing the sign
        of the remainder.
     */
         //fprintf(stderr, "ci == %d, cxi == %d, dw[0] == %d\n", (int) ci, (int) cxi, (int) dw[0]);
-    for(cxi=cxs;cxi<=cxe;cxi++) {
+    for(cxi=cxs;cxi<cxe;cxi++) {
         ci = (cxi % dw[0]);
         ci = (ci < 0) ? ci + dw[0] : ci;
         if ( ci < gxs*refratio || ci >= gxe*refratio) continue;
         gxi = floor(ci / refratio) - gxs;
-        for(cyi=cys;cyi<=cye;cyi++) {
+        for(cyi=cys;cyi<cye;cyi++) {
             cj = cyi % dw[1];
             cj = (cj < 0) ? cj + dw[1] : cj;
             if ( cj < gys*refratio || cj >= gye*refratio) continue;
             gyi = floor(cj / refratio) - gys;
-            for(czi=czs;czi<=cze;czi++) {
+            for(czi=czs;czi<cze;czi++) {
                 ck = czi % dw[2];
                 ck = (ck < 0) ? ck + dw[2] : ck;
                 if ( ck < gzs*refratio || ck >= gze*refratio) continue;
                 gzi = floor(ck / refratio) - gzs;
                     if ((ll) || (*(npy_int32*)PyArray_GETPTR3(mask, gxi,gyi,gzi) > 0)) 
                 {
-                for(n=0;n<n_fields;n++){
-                    to_call(c_data[n],
-                        cxi - cxs, cyi - cys, czi - czs,
-                        g_data[n], gxi, gyi, gzi);
-                }
+                if (direction!=2)
+                  for(n=0;n<n_fields;n++){
+                      to_call(c_data[n],
+                          cxi - cxs, cyi - cys, czi - czs,
+                          g_data[n], gxi, gyi, gzi);
+                  }
                 total += 1;
                 }
             }


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -49,7 +49,7 @@
     from mpi4py import MPI
     parallel_capable = (MPI.COMM_WORLD.size > 1)
     if parallel_capable:
-        mylog.info("Parallel computation enabled: %s / %s",
+        mylog.info("Global parallel computation enabled: %s / %s",
                    MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
         ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
         ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
@@ -61,8 +61,6 @@
         #ytcfg["yt","StoreParameterFiles"] = "False"
         # Now let's make sure we have the right options set.
         if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","serialize"):
-                ytcfg["yt","onlydeserialize"] = "True"
             if ytcfg.getboolean("yt","LogFile"):
                 ytcfg["yt","LogFile"] = "False"
                 yt.utilities.logger.disable_file_logging()
@@ -150,8 +148,10 @@
     def __init__(self, pobj, just_list = False, attr='_grids',
                  round_robin=False):
         ObjectIterator.__init__(self, pobj, just_list, attr=attr)
-        self._offset = MPI.COMM_WORLD.rank
-        self._skip = MPI.COMM_WORLD.size
+        # pobj has to be a ParallelAnalysisInterface, so it must have a .comm
+        # object.
+        self._offset = pobj.comm.rank
+        self._skip = pobj.comm.size
         # Note that we're doing this in advance, and with a simple means
         # of choosing them; more advanced methods will be explored later.
         if self._use_all:
@@ -182,11 +182,15 @@
         retval = None
         if self._processing or not self._distributed:
             return func(self, *args, **kwargs)
-        if self._owner == MPI.COMM_WORLD.rank:
+        comm = _get_comm((self,))
+        if self._owner == comm.rank:
             self._processing = True
             retval = func(self, *args, **kwargs)
             self._processing = False
-        retval = MPI.COMM_WORLD.bcast(retval, root=self._owner)
+        # To be sure we utilize the root= kwarg, we manually access the .comm
+        # attribute, which must be an instance of MPI.Intracomm, and call bcast
+        # on that.
+        retval = comm.comm.bcast(retval, root=self._owner)
         #MPI.COMM_WORLD.Barrier()
         return retval
     return single_proc_results
@@ -220,6 +224,13 @@
         return func(self, data, **kwargs)
     return passage
 
+def _get_comm(args):
+    if len(args) > 0 and hasattr(args[0], "comm"):
+        comm = args[0].comm
+    else:
+        comm = communication_system.communicators[-1]
+    return comm
+
 def parallel_blocking_call(func):
     """
     This decorator blocks on entry and exit of a function.
@@ -227,10 +238,11 @@
     @wraps(func)
     def barrierize(*args, **kwargs):
         mylog.debug("Entering barrier before %s", func.func_name)
-        MPI.COMM_WORLD.Barrier()
+        comm = _get_comm(args)
+        comm.barrier()
         retval = func(*args, **kwargs)
         mylog.debug("Entering barrier after %s", func.func_name)
-        MPI.COMM_WORLD.Barrier()
+        comm.barrier()
         return retval
     if parallel_capable:
         return barrierize
@@ -244,10 +256,11 @@
     """
     @wraps(f1)
     def in_order(*args, **kwargs):
-        if MPI.COMM_WORLD.rank == 0:
+        comm = _get_comm(args)
+        if comm.rank == 0:
             f1(*args, **kwargs)
-        MPI.COMM_WORLD.Barrier()
-        if MPI.COMM_WORLD.rank != 0:
+        comm.barrier()
+        if comm.rank != 0:
             f2(*args, **kwargs)
     if not parallel_capable: return f1
     return in_order
@@ -259,7 +272,8 @@
     """
     @wraps(func)
     def root_only(*args, **kwargs):
-        if MPI.COMM_WORLD.rank == 0:
+        comm = _get_comm(args)
+        if comm.rank == 0:
             try:
                 func(*args, **kwargs)
                 all_clear = 1
@@ -268,8 +282,7 @@
                 all_clear = 0
         else:
             all_clear = None
-        #MPI.COMM_WORLD.Barrier()
-        all_clear = MPI.COMM_WORLD.bcast(all_clear, root=0)
+        all_clear = comm.mpi_bcast_pickled(all_clear)
         if not all_clear: raise RuntimeError
     if parallel_capable: return root_only
     return func
@@ -331,16 +344,23 @@
     result_id = None
 
 def parallel_objects(objects, njobs, storage = None):
-    if not parallel_capable: raise RuntimeError
+    if not parallel_capable:
+        njobs = 1
+        mylog.warn("parallel_objects() is being used when parallel_capable is false. The loop is not being run in parallel. This may not be what was expected.")
     my_communicator = communication_system.communicators[-1]
     my_size = my_communicator.size
+    if njobs > my_size:
+        mylog.error("You have asked for %s jobs, but you only have %s processors.",
+            njobs, my_size)
+        raise RuntimeError
     my_rank = my_communicator.rank
     all_new_comms = na.array_split(na.arange(my_size), njobs)
     for i,comm_set in enumerate(all_new_comms):
         if my_rank in comm_set:
             my_new_id = i
             break
-    communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
+    if parallel_capable:
+        communication_system.push_with_ids(all_new_comms[my_new_id].tolist())
     obj_ids = na.arange(len(objects))
 
     to_share = {}
@@ -367,31 +387,29 @@
             self.communicators.append(Communicator(MPI.COMM_WORLD))
         else:
             self.communicators.append(Communicator(None))
-    def push(self, size=None, ranks=None):
-        raise NotImplementedError
-        if size is None:
-            size = len(available_ranks)
-        if len(available_ranks) < size:
-            raise RuntimeError
-        if ranks is None:
-            ranks = [available_ranks.pop() for i in range(size)]
-        
-        group = MPI.COMM_WORLD.Group.Incl(ranks)
-        new_comm = MPI.COMM_WORLD.Create(group)
-        self.communicators.append(Communicator(new_comm))
-        return new_comm
+
+    def push(self, new_comm):
+        if not isinstance(new_comm, Communicator):
+            new_comm = Communicator(new_comm)
+        self.communicators.append(new_comm)
+        self._update_parallel_state(new_comm)
 
     def push_with_ids(self, ids):
         group = self.communicators[-1].comm.Get_group().Incl(ids)
         new_comm = self.communicators[-1].comm.Create(group)
+        self.push(new_comm)
+        return new_comm
+
+    def _update_parallel_state(self, new_comm):
         from yt.config import ytcfg
         ytcfg["yt","__topcomm_parallel_size"] = str(new_comm.size)
         ytcfg["yt","__topcomm_parallel_rank"] = str(new_comm.rank)
-        self.communicators.append(Communicator(new_comm))
-        return new_comm
+        if MPI.COMM_WORLD.rank > 0 and ytcfg.getboolean("yt","serialize"):
+            ytcfg["yt","onlydeserialize"] = "True"
 
     def pop(self):
         self.communicators.pop()
+        self._update_parallel_state(self.communicators[-1])
 
 class Communicator(object):
     comm = None
@@ -495,12 +513,12 @@
             data = self.alltoallv_array(data, arr_size, offsets, sizes)
             return data
         elif datatype == "list" and op == "cat":
-            if self.comm.rank == 0:
-                data = self.__mpi_recvlist(data)
-            else:
-                self.comm.send(data, dest=0, tag=0)
-            mylog.debug("Opening MPI Broadcast on %s", self.comm.rank)
-            data = self.comm.bcast(data, root=0)
+            recv_data = self.comm.allgather(data)
+            # Now flatten into a single list, since this 
+            # returns us a list of lists.
+            data = []
+            while recv_data:
+                data.extend(recv_data.pop(0))
             return data
         raise NotImplementedError
 
@@ -775,6 +793,8 @@
         deps = []
         fi = self.pf.field_info
         for field in fields:
+            if any(getattr(v,"ghost_zones", 0) > 0 for v in
+                   fi[field].validators): continue
             deps += ensure_list(fi[field].get_dependencies(pf=self.pf).requested)
         return list(set(deps))
 




diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -66,6 +66,10 @@
     server.server_close()
     if size > 1:
         from mpi4py import MPI
+        # This COMM_WORLD is okay.  We want to barrierize here, while waiting
+        # for shutdown from the rest of the parallel group.  If you are running
+        # with --rpdb it is assumed you know what you are doing and you won't
+        # let this get out of hand.
         MPI.COMM_WORLD.Barrier()
 
 class pdb_handler(object):


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/color_maps.py
--- a/yt/visualization/color_maps.py
+++ b/yt/visualization/color_maps.py
@@ -34,15 +34,15 @@
     except ValueError:
         return False
 
-raven_colormaps = {}
+yt_colormaps = {}
 
 def add_cmap(name, cdict):
-    raven_colormaps[name] = \
+    yt_colormaps[name] = \
         cc.LinearSegmentedColormap(name,cdict,256)
     mcm.datad[name] = cdict
     mcm.__dict__[name] = cdict
     try: # API compatibility
-        mcm.register_cmap(name, raven_colormaps[name])
+        mcm.register_cmap(name, yt_colormaps[name])
     except AttributeError:
         pass
     


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -262,176 +262,6 @@
     def set_limits(self, xlim, ylim):
         for w in self.windows: w.set_limits(xlim, ylim)
 
-class RemoteWindowedVariableMeshController(MultipleWindowVariableMeshPanner):
-    def __init__(self, source, mec = None):
-        """
-        This panner controls remote windowed panners.  It requires a *source*,
-        which will be pickled and sent to the remote panners, which it will
-        create as requested.  If not supplied with a *mec* (an IPython
-        MultiEngineClient) it will create one itself.
-        """
-        if mec is None:
-            from IPython.kernel.client import get_multiengine_client
-            mec = get_multiengine_client()
-        self.mec = mec
-        self.mec.execute("import yt.extensions.image_panner")
-        self._var_name = "_image_panner_%s" % (id(self))
-        self._pf_name = "_pf_%s" % (id(self))
-        self._source_name = "_source_%s" % (id(self))
-        self.source = source
-        self.mec.execute("from yt.mods import *")
-        self.mec.execute("from yt.funcs import iterable")
-        self.mec.push({self._pf_name: self.pf})
-        self.mec.execute("%s.h" % self._pf_name)
-        self.mec.push({self._source_name: self.source})
-        # Now, because the double pickling tosses a PF hash reference inside
-        # the unpickled object, we work around it a little
-        self.mec.execute("while iterable(%s): %s = %s[1]" % (
-            self._source_name, self._source_name, self._source_name))
-        self.windows = []
-
-    def add_window(self, *args, **kwargs):
-        """
-        This will create a new remote WindowedVariableMeshImagePanner.  The
-        *args* and *kwargs* supplied here will be passed over, but the *source*
-        argument is implicitly handled by this routine.
-        """
-        engine_id = len(self.windows)
-        an = "_args_%s" % id(self)
-        kn = "_kwargs_%s" % id(self)
-        if 'callback' not in kwargs:
-            kwargs['callback'] = ImageSaver(engine_id)
-        self.mec.push({an: args, kn: kwargs}, engine_id)
-        exec_string = "%s = %s.h.windowed_image_panner(%s, *%s, **%s)" % (
-            self._var_name, self._pf_name, self._source_name, an, kn)
-        self.mec.execute(exec_string, engine_id)
-        self.windows.append(WindowedVariableMeshPannerProxy(
-            self.mec, engine_id, self._var_name, id(self)))
-
-data_object_registry["remote_image_panner"] = RemoteWindowedVariableMeshController
-
-_wrapped_methods = ["zoom", "pan", "pan_x", "pan_y", "pan_rel",
-                     "pan_rel_x", "pan_rel_y", "set_limits"]
-
-class WindowedVariableMeshPannerProxy(object):
-    class __metaclass__(type):
-        def __new__(cls, name, b, d):
-            # We add on a bunch of proxy functions
-            def return_proxy(fname):
-                def func(self, *args, **kwargs):
-                    vn = "_ret_%s" % self._cid
-                    an = "_args_%s" % self._cid
-                    kn = "_kwargs_%s" % self._cid
-                    self.mec.push({an: args, kn: kwargs}, self.engine_id)
-                    exec_string = "%s = %s.%s(*%s, **%s)" % (
-                        vn, self._var_name, fname, an, kn)
-                    print "Executing %s on %s" % (exec_string, self.engine_id)
-                    self.mec.execute(exec_string, self.engine_id)
-                    return self.mec.pull(vn, self.engine_id)
-                return func
-            new_dict = {}
-            new_dict.update(d)
-            for f in _wrapped_methods:
-                new_dict[f] = return_proxy(f)
-            return type.__new__(cls, name, b, new_dict)
-
-    def __init__(self, mec, engine_id, var_name, cid):
-        # mec here is, optionally, an instance of MultiEngineClient
-        self._var_name = var_name
-        self._cid = cid
-        self.engine_id = engine_id
-        self.mec = mec
-
-    @property
-    def bounds(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.bounds" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    @property
-    def width(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.width" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    @property
-    def buffer(self):
-        vn = "_ret_%s" % self._cid
-        self.mec.execute("%s = %s.buffer" % (vn, self._var_name),
-                         self.engine_id)
-        return self.mec.pull(vn, self.engine_id)
-
-    def _regenerate_buffer(self):
-        return
-
-    def _run_callback(self):
-        self.mec.execute("%s._regenerate_buffer()" % self._var_name,
-                         self.engine_id)
-        self.mec.execute("%s.callback(%s.buffer)" % (
-            self._var_name, self._var_name), self.engine_id)
-
-class ProxySource(object):
-    # This proxies only the things we know we need
-    # Note that we assume we will only have a single engine.
-    def __init__(self, mec, idnum, source_varname):
-        self.mec = mec
-        self.idnum = idnum
-        self.source_varname = source_varname
-        self.mec.execute("_tmp_%s = %s.axis" % (
-            self.idnum, self.source_varname))
-        self.axis = self.mec.pull("_tmp_%s" % self.idnum)[0]
-
-    def keys(self):
-        self.mec.execute("_tmp_%s = %s.keys()" % (
-            self.idnum, self.source_varname))
-        keys = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        dd = dict( (k, None) for k in keys )
-        return dd
-
-    @property
-    def pf(self):
-        self.mec.execute("_tmp_%s = %s.pf.domain_left_edge" % (
-            self.idnum, self.source_varname))
-        DLE = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        self.mec.execute("_tmp_%s = %s.pf.domain_right_edge" % (
-            self.idnum, self.source_varname))
-        DRE = self.mec.pull("_tmp_%s" % self.idnum)[0]
-        return dict(DomainLeftEdge = DLE, DomainRightEdge = DRE)
-
-class ProxyFixedResolutionBuffer(dict):
-    pass
-
-class NonLocalDataImagePanner(VariableMeshPanner):
-    def __init__(self, mec, source_varname, size, field,
-                 callback = None, viewport_callback = None):
-        self.source_varname = source_varname
-        self._var_name = "_image_panner_%s" % (id(self))
-        self.mec = mec
-        self.mec.execute("import yt.extensions.image_panner")
-        self.mec.execute("%s = yt.extensions.image_panner.VariableMeshPanner(" % (
-                        self._var_name) +
-                          "%s, (%s, %s), '%s')" % (
-                        source_varname, size[0], size[1], field))
-
-        ps = ProxySource(mec, id(self), source_varname)
-        self._prfb = ProxyFixedResolutionBuffer()
-
-        VariableMeshPanner.__init__(self, ps, size, field,
-                        callback, viewport_callback)
-
-    def _regenerate_buffer(self):
-        args = (self.xlim, self.ylim)
-        self.mec.push({'_tmp_%s' % id(self) : args}, block=False)
-        self.mec.execute("%s.set_limits(*_tmp_%s)" % (self._var_name, id(self)),
-                         block=False)
-        self.mec.execute("_tmp_%s = %s.buffer" % (id(self), self._var_name),
-                         block=False)
-        self._prfb[self.field] = self.mec.pull("_tmp_%s" % (id(self)))[0]
-        self._prfb.bounds = self.xlim + self.ylim
-        self._buffer = self._prfb
-
 class ImageSaver(object):
     def __init__(self, tile_id):
         """
@@ -479,43 +309,3 @@
         tf.close()
         self.transport.append(response_body)
 
-class PanningCeleritasStreamer(object):
-    _initialized = False
-    def __init__(self, tile_id, cmap = "algae", port = 9988,
-                 zlim = (0.0, 1.0), take_log = True):
-        """
-        This is an in-development mechanism for supplying buffers to a
-        Celeritas server.
-        """
-        self.tile_id = tile_id
-        self._port = port
-        self.cmap = cmap
-        self.zlim = zlim
-        self.take_log = True
-
-    def initialize(self, shape):
-        if isinstance(self.cmap, types.StringTypes):
-            import matplotlib.cm
-            self.cmap = matplotlib.cm.get_cmap(self.cmap)
-
-        import celeritas_streamer
-        self.cs = celeritas_streamer.CeleritasStream()
-        #print "Setting shape: %s and port: %s in %s" % (
-        #    shape, self._port, os.getpid())
-        self.cs.setSize(*shape)
-        self.cs.setLocalPort(self._port)
-        self.cs.initialize()
-        self._initialized = True
-
-    def __call__(self, val):
-        if not self._initialized: self.initialize(val.shape)
-        if self.take_log:
-            vv = na.log10(val)
-        else:
-            vv = val.copy()
-        na.subtract(vv, self.zlim[0], vv)
-        na.divide(vv, (self.zlim[1]-self.zlim[0]), vv)
-        new_buf = self.cmap(vv)[:,:,:3]
-        na.multiply(new_buf, 255.0, new_buf)
-        new_buf = new_buf.astype('uint8')
-        self.cs.readFromRGBMemAndSend(new_buf)


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/plot_collection.py
--- a/yt/visualization/plot_collection.py
+++ b/yt/visualization/plot_collection.py
@@ -32,7 +32,8 @@
 from yt.data_objects.profiles import \
     BinnedProfile1D, \
     BinnedProfile2D
-from yt.utilities.definitions import axis_names, inv_axis_names
+from yt.utilities.definitions import \
+    axis_names, inv_axis_names, x_dict, y_dict
 from .plot_types import \
     FixedResolutionPlot, \
     SlicePlot, \
@@ -1064,7 +1065,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted
@@ -1188,7 +1189,7 @@
             The center to be used for things like radius and radial velocity.
             Defaults to the center of the plot collection.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted
@@ -1790,3 +1791,75 @@
             ax.clear()
             cbars.append(ax)
     return fig, tr, cbars
+
+def _MPLFixImage(data_source, image_obj, field, cbar, cls):
+    nx, ny = image_obj.get_size()
+    def f(axes):
+        x0, x1 = axes.get_xlim()
+        y0, y1 = axes.get_ylim()
+        frb = cls(data_source, (x0, x1, y0, y1), (nx, ny))
+        image_obj.set_data(frb[field])
+        mi, ma = frb[field].min(), frb[field].max()
+        cbar.norm.autoscale((mi, ma))
+        image_obj.set_extent([x0, x1, y0, y1])
+        cbar.update_bruteforce(image_obj)
+    return f
+
+def matplotlib_widget(data_source, field, npix):
+    r"""Create a widget from a data_source that uses the Matplotlib interaction
+    method to pan, zoom, and so on.
+
+    This is a simple way to take a yt data source, for instance a projection or
+    a slice, and to create a matplotlib view into it that you can pan and zoom.
+    It uses the matplotlib interaction engine to manage input and display.
+
+    Parameters
+    ----------
+    data_source : :class:`yt.data_objects.data_containers.AMRProjBase` or :class:`yt.data_objects.data_containers.AMRSliceBase`
+        This is the source to be pixelized, which can be a projection or a
+        slice.  
+    field : string
+        The field that you want to display in the window.
+    npix : int
+        The number of pixels on a side you want the image to be.
+
+    Examples
+    --------
+
+    >>> pf = load("DD0030/DD0030")
+    >>> p = pf.h.proj(0, "Density")
+    >>> matplotlib_widget(p, "Density", 1024)
+
+    """
+    import pylab
+    import matplotlib.colors
+    from .fixed_resolution import FixedResolutionBuffer, \
+            ObliqueFixedResolutionBuffer
+    pf = data_source.pf
+    if getattr(data_source, "axis", 4) < 3:
+        cls = FixedResolutionBuffer
+        ax = data_source.axis
+        extent = [pf.domain_left_edge[x_dict[ax]],
+                  pf.domain_right_edge[x_dict[ax]],
+                  pf.domain_left_edge[y_dict[ax]],
+                  pf.domain_right_edge[y_dict[ax]]]
+    else:
+        cls = ObliqueFixedResolutionBuffer
+        extent = [0.0, 1.0, 0.0, 1.0]
+    take_log = pf.field_info[field].take_log
+    if take_log:
+        norm = matplotlib.colors.LogNorm()
+    else:
+        norm = matplotlib.colors.Normalize()
+    ax = pylab.figure().gca()
+    ax.autoscale(False)
+    axi = ax.imshow(na.random.random((npix, npix)),
+                    extent = extent, norm = norm,
+                    origin = 'lower')
+    cb = pylab.colorbar(axi, norm = norm)
+    showme = _MPLFixImage(data_source, axi, field, cb, cls)
+    ax.callbacks.connect("xlim_changed", showme)
+    ax.callbacks.connect("ylim_changed", showme)
+    ax.set_xlim(extent[0], extent[1])
+    ax.set_ylim(extent[2], extent[3])
+    return ax


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -164,8 +164,8 @@
                              plot.data[self.field_y] - self.bv_y,
                              int(nx), int(ny),
                            (x0, x1, y0, y1),).transpose()
-        X = na.mgrid[0:plot.image._A.shape[0]-1:nx*1j]# + 0.5*factor
-        Y = na.mgrid[0:plot.image._A.shape[1]-1:ny*1j]# + 0.5*factor
+        X = na.mgrid[0:plot.image._A.shape[0]-1:ny*1j]# + 0.5*factor
+        Y = na.mgrid[0:plot.image._A.shape[1]-1:nx*1j]# + 0.5*factor
         if self.normalize:
             nn = na.sqrt(pixX**2 + pixY**2)
             pixX /= nn
@@ -723,7 +723,7 @@
                  font_size=8, print_halo_size=False,
                  print_halo_mass=False, width=None):
         """
-        Accepts a :class:`yt.lagos.HopList` *hop_output* and plots up to
+        Accepts a :class:`yt.HopList` *hop_output* and plots up to
         *max_number* (None for unlimited) halos as circles.
         """
         self.hop_output = hop_output


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/plot_types.py
--- a/yt/visualization/plot_types.py
+++ b/yt/visualization/plot_types.py
@@ -35,7 +35,7 @@
     x_dict, \
     y_dict, \
     axis_names
-from .color_maps import raven_colormaps
+from .color_maps import yt_colormaps
 
 class CallbackRegistryHandler(object):
     def __init__(self, plot):
@@ -226,8 +226,8 @@
         Change the colormap of this plot to *cmap*.
         """
         if isinstance(cmap, types.StringTypes):
-            if str(cmap) in raven_colormaps:
-                cmap = raven_colormaps[str(cmap)]
+            if str(cmap) in yt_colormaps:
+                cmap = yt_colormaps[str(cmap)]
             elif hasattr(matplotlib.cm, cmap):
                 cmap = getattr(matplotlib.cm, cmap)
         self.cmap = cmap


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -198,7 +198,7 @@
             the y-axis.  All subsequent fields will be binned and their
             profiles added to the underlying `BinnedProfile2D`.
         cmap : string, optional
-            An acceptable colormap.  See either raven.color_maps or
+            An acceptable colormap.  See either yt.visualization.color_maps or
             http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
         weight : string, default "CellMassMsun"
             The weighting field for an average.  This defaults to mass-weighted


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/streamlines.py
--- a/yt/visualization/streamlines.py
+++ b/yt/visualization/streamlines.py
@@ -38,7 +38,7 @@
 
     Parameters
     ----------
-    pf : `~yt.lagos.StaticOutput`
+    pf : `~yt.data_objects.StaticOutput`
         This is the parameter file to streamline
     pos : array_like
         An array of initial starting positions of the streamlines.


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -193,6 +193,9 @@
         self.steady_north = steady_north
         self.expand_factor = expand_factor
         # This seems to be necessary for now.  Not sure what goes wrong when not true.
+        if na.all(north_vector == normal_vector):
+            mylog.error("North vector and normal vector are the same.  Disregarding north vector.")
+            north_vector == None
         if north_vector is not None: self.steady_north=True
         self.north_vector = north_vector
         self.rotation_vector = north_vector
@@ -414,7 +417,7 @@
             self.zoom(f)
             yield self.snapshot()
 
-    def move_to(self, final, n_steps, final_width=None, exponential=True):
+    def move_to(self, final, n_steps, final_width=None, exponential=False):
         r"""Loop over a look_at
 
         This will yield `n_steps` snapshots until the current view has been
@@ -446,8 +449,12 @@
                 if not iterable(final_width):
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
+                if (self.center == 0.0).all():
+                    self.center += (na.array(final) - self.center) / (10. * n_steps)
                 final_zoom = final_width/na.array(self.width)
                 dW = final_zoom**(1.0/n_steps)
+	    else:
+		dW = 1.0
             position_diff = (na.array(final)/self.center)*1.0
             dx = position_diff**(1.0/n_steps)
         else:
@@ -456,6 +463,8 @@
                     width = na.array([final_width, final_width, final_width]) 
                     # front/back, left/right, top/bottom
                 dW = (1.0*final_width-na.array(self.width))/n_steps
+	    else:
+		dW = 1.0
             dx = (na.array(final)-self.center)*1.0/n_steps
         for i in xrange(n_steps):
             if exponential:
@@ -533,6 +542,62 @@
 
 data_object_registry["camera"] = Camera
 
+class InteractiveCamera(Camera):
+    def __init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = None, steady_north=False,
+                 volume = None, fields = None,
+                 log_fields = None,
+                 sub_samples = 5, pf = None,
+                 use_kd=True, l_max=None, no_ghost=True,
+                 tree_type='domain',expand_factor=1.0,
+                 le=None, re=None):
+        self.frames = []
+        Camera.__init__(self, center, normal_vector, width,
+                 resolution, transfer_function,
+                 north_vector = north_vector, steady_north=steady_north,
+                 volume = volume, fields = fields,
+                 log_fields = log_fields,
+                 sub_samples = sub_samples, pf = pf,
+                 use_kd=use_kd, l_max=l_max, no_ghost=no_ghost,
+                 tree_type=tree_type,expand_factor=expand_factor,
+                 le=le, re=re)
+
+    def snapshot(self, fn = None, clip_ratio = None):
+        import matplotlib
+        matplotlib.pylab.figure(2)
+        self.transfer_function.show()
+        matplotlib.pylab.draw()
+        im = Camera.snapshot(self, fn, clip_ratio)
+        matplotlib.pylab.figure(1)
+        matplotlib.pylab.imshow(im/im.max())
+        matplotlib.pylab.draw()
+        self.frames.append(im)
+        
+    def rotation(self, theta, n_steps, rot_vector=None):
+        for frame in Camera.rotation(self, theta, n_steps, rot_vector):
+            if frame is not None:
+                self.frames.append(frame)
+                
+    def zoomin(self, final, n_steps):
+        for frame in Camera.zoomin(self, final, n_steps):
+            if frame is not None:
+                self.frames.append(frame)
+                
+    def clear_frames(self):
+        del self.frames
+        self.frames = []
+        
+    def save_frames(self, basename, clip_ratio=None):
+        for i, frame in enumerate(self.frames):
+            fn = basename + '_%04i.png'%i
+            if clip_ratio is not None:
+                write_bitmap(frame, fn, clip_ratio*image.std())
+            else:
+                write_bitmap(frame, fn)
+
+data_object_registry["interactive_camera"] = InteractiveCamera
+
 class PerspectiveCamera(Camera):
     def get_vector_plane(self, image):
         # We should move away from pre-generation of vectors like this and into
@@ -582,8 +647,8 @@
                  transfer_function = None, fields = None,
                  sub_samples = 5, log_fields = None, volume = None,
                  pf = None, use_kd=True, no_ghost=False):
-	ParallelAnalysisInterface.__init__(self)
-	if pf is not None: self.pf = pf
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
         self.center = na.array(center, dtype='float64')
         self.radius = radius
         self.nside = nside
@@ -652,8 +717,8 @@
                  sub_samples = 5, log_fields = None, volume = None,
                  pf = None, use_kd=True, no_ghost=False,
                  rays_per_cell = 0.1, max_nside = 8192):
-	ParallelAnalysisInterface.__init__(self)
-	if pf is not None: self.pf = pf
+        ParallelAnalysisInterface.__init__(self)
+        if pf is not None: self.pf = pf
         self.center = na.array(center, dtype='float64')
         self.radius = radius
         self.use_kd = use_kd
@@ -706,8 +771,8 @@
 
 class StereoPairCamera(Camera):
     def __init__(self, original_camera, relative_separation = 0.005):
-	ParallelAnalysisInterface.__init__(self)
-	self.original_camera = original_camera
+        ParallelAnalysisInterface.__init__(self)
+        self.original_camera = original_camera
         self.relative_separation = relative_separation
 
     def split(self):
@@ -729,7 +794,7 @@
         return (left_camera, right_camera)
 
 def off_axis_projection(pf, center, normal_vector, width, resolution,
-                        field, weight = None, volume = None):
+                        field, weight = None, volume = None, no_ghost = True):
     r"""Project through a parameter file, off-axis, and return the image plane.
 
     This function will accept the necessary items to integrate through a volume
@@ -761,6 +826,14 @@
     volume : `yt.extensions.volume_rendering.HomogenizedVolume`, optional
         The volume to ray cast through.  Can be specified for finer-grained
         control, but otherwise will be automatically generated.
+    no_ghost: bool, optional
+        Optimization option.  If True, homogenized bricks will
+        extrapolate out from grid instead of interpolating from
+        ghost zones that have to first be calculated.  This can
+        lead to large speed improvements, but at a loss of
+        accuracy/smoothness in resulting image.  The effects are
+        less notable when the transfer function is smooth and
+        broad. Default: True
 
     Returns
     -------
@@ -788,7 +861,7 @@
     cam = pf.h.camera(center, normal_vector, width, resolution, tf,
                       fields = fields,
                       log_fields = [False] * len(fields),
-                      volume = volume)
+                      volume = volume, no_ghost = no_ghost)
     vals = cam.snapshot()
     image = vals[:,:,0]
     if weight is None:
@@ -796,5 +869,5 @@
         image *= dl
     else:
         image /= vals[:,:,1]
-        pf.field_info._field_list.pop("temp_weightfield")
+        pf.field_info.pop("temp_weightfield")
     return image


diff -r 44fbf08affb7a88018e17120eebf362e0391dae4 -r 995fd6c8ef5bdb97fb4fe60fc575b549237f6f5d yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -199,6 +199,28 @@
         pylab.ylim(0.0, 1.0)
         pylab.savefig(filename)
 
+    def show(self):
+        r"""Display an image of the transfer function
+
+        This function loads up matplotlib and displays the current transfer function.
+
+        Parameters
+        ----------
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        >>> tf.show()
+        """
+        import matplotlib;import pylab
+        pylab.clf()
+        pylab.plot(self.x, self.y, 'xk-')
+        pylab.xlim(*self.x_bounds)
+        pylab.ylim(0.0, 1.0)
+        pylab.draw()
+
 class MultiVariateTransferFunction(object):
     def __init__(self):
         r"""This object constructs a set of field tables that allow for
@@ -447,6 +469,46 @@
         ax.set_xlabel("Value")
         pyplot.savefig(filename)
 
+    def show(self):
+        r"""Display an image of the transfer function
+
+        This function loads up matplotlib and displays the current transfer function.
+
+        Parameters
+        ----------
+
+        Examples
+        --------
+
+        >>> tf = TransferFunction( (-10.0, -5.0) )
+        >>> tf.add_gaussian(-9.0, 0.01, 1.0)
+        >>> tf.show()
+        """
+        from matplotlib import pyplot
+        from matplotlib.ticker import FuncFormatter
+        pyplot.clf()
+        ax = pyplot.axes()
+        i_data = na.zeros((self.alpha.x.size, self.funcs[0].y.size, 3))
+        i_data[:,:,0] = na.outer(na.ones(self.alpha.x.size), self.funcs[0].y)
+        i_data[:,:,1] = na.outer(na.ones(self.alpha.x.size), self.funcs[1].y)
+        i_data[:,:,2] = na.outer(na.ones(self.alpha.x.size), self.funcs[2].y)
+        ax.imshow(i_data, origin='lower')
+        ax.fill_between(na.arange(self.alpha.y.size), self.alpha.x.size * self.alpha.y, y2=self.alpha.x.size, color='white')
+        ax.set_xlim(0, self.alpha.x.size)
+        xticks = na.arange(na.ceil(self.alpha.x[0]), na.floor(self.alpha.x[-1]) + 1, 1) - self.alpha.x[0]
+        xticks *= self.alpha.x.size / (self.alpha.x[-1] - self.alpha.x[0])
+        ax.xaxis.set_ticks(xticks)
+        def x_format(x, pos):
+            return "%.1f" % (x * (self.alpha.x[-1] - self.alpha.x[0]) / (self.alpha.x.size) + self.alpha.x[0])
+        ax.xaxis.set_major_formatter(FuncFormatter(x_format))
+        yticks = na.linspace(0,1,5) * self.alpha.y.size
+        ax.yaxis.set_ticks(yticks)
+        def y_format(y, pos):
+            return (y / self.alpha.y.size)
+        ax.yaxis.set_major_formatter(FuncFormatter(y_format))
+        ax.set_ylabel("Transmission")
+        ax.set_xlabel("Value")
+        
     def sample_colormap(self, v, w, alpha=None, colormap="gist_stern", col_bounds=None):
         r"""Add a Gaussian based on an existing colormap.
 
@@ -491,8 +553,8 @@
         r,g,b,a = cmap(rel)
         if alpha is None: alpha = a
         self.add_gaussian(v, w, [r,g,b,alpha])
-        print "Adding gaussian at %s with width %s and colors %s" % (
-                v, w, (r,g,b,alpha))
+        mylog.debug("Adding gaussian at %s with width %s and colors %s" % (
+                v, w, (r,g,b,alpha)))
 
     def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
                    colormap="gist_stern", col_bounds = None):
@@ -628,7 +690,7 @@
             # Now we set up the scattering
             scat = (johnson_filters[f]["Lchar"]**-4 / mscat)*anorm
             tf = TransferFunction(rho_bounds)
-            print "Adding: %s with relative scattering %s" % (f, scat)
+            mylog.debug("Adding: %s with relative scattering %s" % (f, scat))
             tf.y *= 0.0; tf.y += scat
             self.add_field_table(tf, 1, weight_field_id = 1)
             self.link_channels(i+3, i+3)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list