[Yt-svn] commit/yt: 19 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Sep 2 07:21:04 PDT 2011


19 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/9d936221b3bc/
changeset:   9d936221b3bc
branch:      yt
user:        MatthewTurk
date:        2011-08-25 21:01:26
summary:     Major changes to the covering grid system.  Removed old float covering grids,
deprecating them in favor of the now-standard int covering grids.  Also
reworked much of the geometry and eliminated a few major hotspots in the
smoothed covering grids.  On the FLASH dataset example, this brought runtime
for calculating global averaged density from 550 seconds to 120 seconds.
Remaining hotspots are the function _get_data_from_grid in the smoothed
covering grid, which probably spends most of its time in the zipping of fields
and whatnot.
affected #:  4 files (13.1 KB)

--- a/yt/data_objects/data_containers.py	Thu Aug 25 08:06:53 2011 -0400
+++ b/yt/data_objects/data_containers.py	Thu Aug 25 15:01:26 2011 -0400
@@ -40,7 +40,7 @@
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
-    QuadTree
+    QuadTree, get_box_grids_below_level
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -2966,251 +2966,6 @@
         """
         return 4./3. * math.pi * (self.radius * self.pf[unit])**3.0
 
-class AMRFloatCoveringGridBase(AMR3DData):
-    """
-    Covering grids represent fixed-resolution data over a given region.
-    In order to achieve this goal -- for instance in order to obtain ghost
-    zones -- grids up to and including the indicated level are included.
-    No interpolation is done (as that would affect the 'power' on small
-    scales) on the input data.
-    """
-    _spatial = True
-    _type_name = "float_covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
-    def __init__(self, level, left_edge, right_edge, dims, fields = None,
-                 pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
-        """
-        The data object returned will consider grids up to *level* in
-        generating fixed resolution data between *left_edge* and *right_edge*
-        that is *dims* (3-values) on a side.
-        """
-        AMR3DData.__init__(self, center=None, fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
-        self.level = level
-        self.ActiveDimensions = na.array(dims)
-        dds = (self.right_edge-self.left_edge) \
-              / self.ActiveDimensions
-        self.dds = dds
-        self.data["dx"] = dds[0]
-        self.data["dy"] = dds[1]
-        self.data["dz"] = dds[2]
-        self._num_ghost_zones = num_ghost_zones
-        self._use_pbar = use_pbar
-        self._refresh_data()
-
-    def _get_list_of_grids(self):
-        if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
-                            self.left_edge, self.right_edge)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge, self.right_edge)
-        level_ind = na.where(self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
-
-    def extract_region(self, indices):
-        mylog.error("Sorry, dude, do it yourself, it's already in 3-D.")
-
-    def _refresh_data(self):
-        AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
-
-    def get_data(self, fields=None):
-        if self._grids is None:
-            self._get_list_of_grids()
-        if fields is None:
-            fields = self.fields[:]
-        else:
-            fields = ensure_list(fields)
-        obtain_fields = []
-        for field in fields:
-            if self.data.has_key(field): continue
-            if field not in self.hierarchy.field_list:
-                try:
-                    #print "Generating", field
-                    self._generate_field(field)
-                    continue
-                except NeedsOriginalGrid, ngt_exception:
-                    pass
-            obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
-        if len(obtain_fields) == 0: return
-        mylog.debug("Getting fields %s from %s possible grids",
-                   obtain_fields, len(self._grids))
-        if self._use_pbar: pbar = \
-                get_pbar('Searching grids for values ', len(self._grids))
-        for i, grid in enumerate(self._grids):
-            if self._use_pbar: pbar.update(i)
-            self._get_data_from_grid(grid, obtain_fields)
-            if not na.any(self[obtain_fields[0]] == -999): break
-        if self._use_pbar: pbar.finish()
-        if na.any(self[obtain_fields[0]] == -999):
-            # and self.dx < self.hierarchy.grids[0].dx:
-            print "COVERING PROBLEM", na.where(self[obtain_fields[0]]==-999)[0].size
-            print na.where(self[obtain_fields[0]]==-999)
-            raise KeyError
-            
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator; this might even raise!
-            self.pf.field_info[field].check_available(self)
-            self[field] = self.pf.field_info[field](self)
-        else: # Can't find the field, try as it might
-            raise KeyError(field)
-
-    def flush_data(self, field=None):
-        """
-        Any modifications made to the data in this object are pushed back
-        to the originating grids, except the cells where those grids are both
-        below the current level `and` have child cells.
-        """
-        self._get_list_of_grids()
-        # We don't generate coordinates here.
-        if field == None:
-            fields_to_get = self.fields[:]
-        else:
-            fields_to_get = ensure_list(field)
-        for grid in self._grids:
-            self._flush_data_to_grid(grid, fields_to_get)
-
-    @restore_grid_state
-    def _get_data_from_grid(self, grid, fields):
-        ll = int(grid.Level == self.level)
-        g_dx = grid.dds.ravel()
-        c_dx = self.dds.ravel()
-        g_fields = [grid[field] for field in ensure_list(fields)]
-        c_fields = [self[field] for field in ensure_list(fields)]
-        DataCubeRefine(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge, self.right_edge, c_dx, c_fields,
-            ll, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    def _flush_data_to_grid(self, grid, fields):
-        ll = int(grid.Level == self.level)
-        g_dx = grid.dds.ravel()
-        c_dx = self.dds.ravel()
-        g_fields = []
-        for field in ensure_list(fields):
-            if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
-            g_fields.append(grid[field])
-        c_fields = [self[field] for field in ensure_list(fields)]
-        DataCubeReplace(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge, self.right_edge, c_dx, c_fields,
-            ll, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    @property
-    def LeftEdge(self):
-        return self.left_edge
-
-    @property
-    def RightEdge(self):
-        return self.right_edge
-
-class AMRSmoothedCoveringGridBase(AMRFloatCoveringGridBase):
-    _type_name = "smoothed_covering_grid"
-    def __init__(self, *args, **kwargs):
-        dlog2 = na.log10(kwargs['dims'])/na.log10(2)
-        if not na.all(na.floor(dlog2) == na.ceil(dlog2)):
-            pass # used to warn but I think it is not accurate anymore
-            #mylog.warning("Must be power of two dimensions")
-            #raise ValueError
-        #kwargs['num_ghost_zones'] = 0
-        AMRFloatCoveringGridBase.__init__(self, *args, **kwargs)
-
-    def _get_list_of_grids(self):
-        if na.any(self.left_edge - self.dds < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + self.dds > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
-                            self.left_edge - self.dds,
-                            self.right_edge + self.dds)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge - self.dds,
-                            self.right_edge + self.dds)
-        level_ind = na.where(self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)]
-
-    def _get_level_array(self, level, fields):
-        fields = ensure_list(fields)
-        # We assume refinement by a factor of two
-        rf = self.pf.refine_by**(self.level - level)
-        dims = na.maximum(1,self.ActiveDimensions/rf) + 2
-        dx = (self.right_edge-self.left_edge)/(dims-2)
-        x,y,z = (na.mgrid[0:dims[0],0:dims[1],0:dims[2]].astype('float64')-0.5)\
-              * dx[0]
-        x += self.left_edge[0] - dx[0]
-        y += self.left_edge[1] - dx[1]
-        z += self.left_edge[2] - dx[2]
-        offsets = [self['cd%s' % ax]*0.5 for ax in 'xyz']
-        bounds = [self.left_edge[0]-offsets[0], self.right_edge[0]+offsets[0],
-                  self.left_edge[1]-offsets[1], self.right_edge[1]+offsets[1],
-                  self.left_edge[2]-offsets[2], self.right_edge[2]+offsets[2]]
-        fake_grid = {'x':x,'y':y,'z':z,'dx':dx[0],'dy':dx[1],'dz':dx[2]}
-        for ax in 'xyz': self['cd%s'%ax] = fake_grid['d%s'%ax]
-        for field in fields:
-            # Generate the new grid field
-            interpolator = TrilinearFieldInterpolator(
-                            self[field], bounds, ['x','y','z'],
-                            truncate = True)
-            self[field] = interpolator(fake_grid)
-        return fake_grid
-
-    def get_data(self, field=None):
-        self._get_list_of_grids()
-        # We don't generate coordinates here.
-        if field == None:
-            fields_to_get = self.fields[:]
-        else:
-            fields_to_get = ensure_list(field)
-        for field in fields_to_get:
-            grid_count = 0
-            if self.data.has_key(field):
-                continue
-            mylog.debug("Getting field %s from %s possible grids",
-                       field, len(self._grids))
-            if self._use_pbar: pbar = \
-                    get_pbar('Searching grids for values ', len(self._grids))
-            # How do we find out the root grid base dx?
-            idims = na.array([3,3,3])
-            dx = na.minimum((self.right_edge-self.left_edge)/(idims-2),
-                            self.pf.h.grids[0].dds[0])
-            idims = na.floor((self.right_edge-self.left_edge)/dx) + 2
-            for ax in 'xyz': self['cd%s'%ax] = dx[0]
-            self[field] = na.zeros(idims,dtype='float64')-999
-            for level in range(self.level+1):
-                for grid in self.select_grids(level):
-                    if self._use_pbar: pbar.update(grid_count)
-                    self._get_data_from_grid(grid, field)
-                    grid_count += 1
-                if level < self.level: self._get_level_array(level+1, field)
-            self[field] = self[field][1:-1,1:-1,1:-1]
-            if self._use_pbar: pbar.finish()
-        
-    @restore_grid_state
-    def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        g_dx = grid.dds
-        c_dx = na.array([self['cdx'],self['cdy'],self['cdz']])
-        g_fields = [grid[field] for field in fields]
-        c_fields = [self[field] for field in fields]
-        total = DataCubeRefine(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge-c_dx, self.right_edge+c_dx,
-            c_dx, c_fields,
-            1, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    def flush_data(self, *args, **kwargs):
-        raise KeyError("Can't do this")
-
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
@@ -3234,16 +2989,15 @@
         if self._grids is not None: return
         if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
            na.any(self.right_edge + buffer > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
-                            self.right_edge + buffer)
+                            self.right_edge + buffer, self.level)
         else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge - buffer,
-                            self.right_edge + buffer)
-        level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
+            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
+                self.left_edge - buffer,
+                self.right_edge + buffer, self.level)
+        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
@@ -3346,20 +3100,22 @@
     def RightEdge(self):
         return self.right_edge
 
-class AMRIntSmoothedCoveringGridBase(AMRCoveringGridBase):
-    _type_name = "si_covering_grid"
+class AMRSmoothedCoveringGridBase(AMRCoveringGridBase):
+    _type_name = "smoothed_covering_grid"
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
-        buffer = self.pf.h.select_grids(0)[0].dds
+        if self._grids is not None: return
+        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
+                 / self.pf.domain_dimensions).max()
         AMRCoveringGridBase._get_list_of_grids(self, buffer)
+        # We reverse the order to ensure that coarse grids are first
         self._grids = self._grids[::-1]
 
     def get_data(self, field=None):
-        dx = [self.pf.h.select_grids(l)[0].dds for l in range(self.level+1)]
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
@@ -3379,14 +3135,16 @@
             # L/R edges.
             # We jump-start our task here
             self._update_level_state(0, field)
-            for level in range(self.level+1):
-                for grid in self.select_grids(level):
-                    if self._use_pbar: pbar.update(grid_count)
-                    self._get_data_from_grid(grid, field, level)
-                    grid_count += 1
-                if level < self.level:
-                    self._update_level_state(level + 1)
+            
+            # The grids are assumed to be pre-sorted
+            last_level = 0
+            for gi, grid in enumerate(self._grids):
+                if self._use_pbar: pbar.update(gi)
+                if grid.Level > last_level and grid.Level <= self.level:
+                    self._update_level_state(last_level + 1)
                     self._refine(1, field)
+                    last_level = grid.Level
+                self._get_data_from_grid(grid, field, grid.Level)
             if self.level > 0:
                 self[field] = self[field][1:-1,1:-1,1:-1]
             if na.any(self[field] == -999):
@@ -3408,11 +3166,13 @@
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
             self[field] = na.zeros(idims,dtype='float64')-999
+            self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
             self[field] = na.zeros(idims,dtype='float64')-999
+            self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, field):
         rf = float(self.pf.refine_by**dlevel)
@@ -3438,17 +3198,17 @@
         interpolator = TrilinearFieldInterpolator(
                         self[field], old_bounds, ['x','y','z'],
                         truncate = True)
+        self._cur_dims = new_dims.astype("int32")
         self[field] = interpolator(fake_grid)
 
     def _get_data_from_grid(self, grid, fields, level):
         fields = ensure_list(fields)
         g_fields = [grid[field] for field in fields]
         c_fields = [self[field] for field in fields]
-        dims = na.array(self[field].shape, dtype='int32')
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
-            dims, grid.ActiveDimensions,
+            self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
 


--- a/yt/data_objects/grid_patch.py	Thu Aug 25 08:06:53 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Thu Aug 25 15:01:26 2011 -0400
@@ -441,7 +441,7 @@
         if smoothed:
             #cube = self.hierarchy.smoothed_covering_grid(
             #    level, new_left_edge, new_right_edge, **kwargs)
-            cube = self.hierarchy.si_covering_grid(
+            cube = self.hierarchy.smoothed_covering_grid(
                 level, new_left_edge, **kwargs)
         else:
             cube = self.hierarchy.covering_grid(


--- a/yt/data_objects/object_finding_mixin.py	Thu Aug 25 08:06:53 2011 -0400
+++ b/yt/data_objects/object_finding_mixin.py	Thu Aug 25 15:01:26 2011 -0400
@@ -26,6 +26,9 @@
 import numpy as na
 
 from yt.funcs import *
+from yt.utilities.amr_utils import \
+    get_box_grids_level, \
+    get_box_grids_below_level
 
 class ObjectFindingMixin(object):
 
@@ -201,3 +204,33 @@
                     mask[gi] = True
         return self.grids[mask], na.where(mask)
 
+    def get_box_grids_below_level(self, left_edge, right_edge, level):
+        # We discard grids if they are ABOVE the level
+        mask = na.empty(self.grids.size, dtype='int32')
+        get_box_grids_below_level(left_edge, right_edge,
+                            level,
+                            self.grid_left_edge, self.grid_right_edge,
+                            self.grid_levels, mask)
+        mask = mask.astype("bool")
+        return self.grids[mask], na.where(mask)
+
+    def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        mask = na.zeros(self.grids.shape, dtype='bool')
+        dl = self.parameter_file.domain_left_edge
+        dr = self.parameter_file.domain_right_edge
+        db = right_edge - left_edge
+        for off_x in [-1, 0, 1]:
+            nle = left_edge.copy()
+            nre = left_edge.copy()
+            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            for off_y in [-1, 0, 1]:
+                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                for off_z in [-1, 0, 1]:
+                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nre = nle + db
+                    g, gi = self.get_box_grids_below_level(nle, nre, level)
+                    mask[gi] = True
+        return self.grids[mask], na.where(mask)
+


--- a/yt/utilities/_amr_utils/misc_utilities.pyx	Thu Aug 25 08:06:53 2011 -0400
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx	Thu Aug 25 15:01:26 2011 -0400
@@ -27,6 +27,9 @@
 cimport numpy as np
 cimport cython
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_color_bounds(np.ndarray[np.float64_t, ndim=1] px,
                      np.ndarray[np.float64_t, ndim=1] py,
                      np.ndarray[np.float64_t, ndim=1] pdx,
@@ -51,6 +54,9 @@
             if v > ma: ma = v
     return (mi, ma)
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
                         np.ndarray[np.float64_t, ndim=1] right_edge,
                         int level,
@@ -77,6 +83,31 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def get_box_grids_below_level(
+                        np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        mask[i] = 0
+        if levels[i,0] <= level:
+            inside = 1
+            for n in range(3):
+                if left_edge[n] >= right_edges[i,n] or \
+                   right_edge[n] <= left_edges[i,n]:
+                    inside = 0
+                    break
+            if inside == 1: mask[i] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def find_values_at_point(np.ndarray[np.float64_t, ndim=1] point,
                          np.ndarray[np.float64_t, ndim=2] left_edges,
                          np.ndarray[np.float64_t, ndim=2] right_edges,


http://bitbucket.org/yt_analysis/yt/changeset/a8e73b0169fc/
changeset:   a8e73b0169fc
branch:      yt
user:        MatthewTurk
date:        2011-08-26 02:07:48
summary:     Backporting a fix from stable for RAMSES cosmology.
affected #:  1 file (440 bytes)

--- a/yt/frontends/ramses/data_structures.py	Thu Aug 25 17:58:53 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Thu Aug 25 20:07:48 2011 -0400
@@ -330,6 +330,14 @@
                                            * rheader['boxlen']
         self.domain_left_edge = na.zeros(3, dtype='float64')
         self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        # This is likely not true, but I am not sure how to otherwise
+        # distinguish them.
+        mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")
+        self.cosmological_simulation = 1
+        self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+        self.omega_lambda = rheader["omega_l"]
+        self.omega_matter = rheader["omega_m"]
+        self.hubble_constant = rheader["H0"]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


http://bitbucket.org/yt_analysis/yt/changeset/6a8f74671b46/
changeset:   6a8f74671b46
branch:      yt
user:        MatthewTurk
date:        2011-08-26 04:03:22
summary:     Removing expired item from import
affected #:  1 file (30 bytes)

--- a/yt/analysis_modules/api.py	Thu Aug 25 20:07:48 2011 -0400
+++ b/yt/analysis_modules/api.py	Thu Aug 25 22:03:22 2011 -0400
@@ -79,7 +79,6 @@
     ExtractedParameterFile
 
 from .level_sets.api import \
-    GridConsiderationQueue, \
     coalesce_join_tree, \
     identify_contours, \
     Clump, \


http://bitbucket.org/yt_analysis/yt/changeset/bafb282e7deb/
changeset:   bafb282e7deb
branch:      yt
user:        MatthewTurk
date:        2011-08-26 04:03:55
summary:     Fixing title underline
affected #:  1 file (1 byte)

--- a/yt/data_objects/object_finding_mixin.py	Thu Aug 25 22:03:22 2011 -0400
+++ b/yt/data_objects/object_finding_mixin.py	Thu Aug 25 22:03:55 2011 -0400
@@ -115,7 +115,7 @@
         as the input *fields*.
         
         Parameters
-        ---------
+        ----------
         fields : string or list of strings
             The field(s) that will be returned.
         


http://bitbucket.org/yt_analysis/yt/changeset/422d8f6b0364/
changeset:   422d8f6b0364
branch:      yt
user:        MatthewTurk
date:        2011-08-26 04:36:52
summary:     Several fixes to docstrings to get the docs to build correctly.
affected #:  4 files (521 bytes)

--- a/yt/analysis_modules/light_cone/light_cone.py	Thu Aug 25 22:03:55 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Thu Aug 25 22:36:52 2011 -0400
@@ -122,12 +122,20 @@
                                                                 deltaz_min=self.deltaz_min)
 
     def calculate_light_cone_solution(self, seed=None, filename=None):
-        """
-        Create list of projections to be added together to make the light cone.
-        :param seed (int): the seed for the random number generator.  Any light cone solution 
-               can be reproduced by giving the same random seed.  Default: None (each solution 
-               will be distinct).
-        :param filename (str): if given, a text file detailing the solution will be written out.  Default: None.
+        r"""Create list of projections to be added together to make the light cone.
+
+        Several sentences providing an extended description. Refer to
+        variables using back-ticks, e.g. `var`.
+
+        Parameters
+        ----------
+        seed : int
+            The seed for the random number generator.  Any light cone solution
+            can be reproduced by giving the same random seed.  Default: None
+            (each solution will be distinct).
+        filename : string
+            If given, a text file detailing the solution will be written out.
+            Default: None.
         """
 
         # Don't use box coherence with maximum projection depths.
@@ -209,13 +217,17 @@
             self._save_light_cone_solution(filename=filename)
 
     def get_halo_mask(self, mask_file=None, map_file=None, **kwargs):
+        r"""Gets a halo mask from a file or makes a new one.
+
+        Parameters
+        ----------
+        mask_file : string, optional
+            An HDF5 file to which to output the halo mask
+        map_file : string, optional
+            A text file to which to output the halo map (locations in the
+            images of the halos
+
         """
-        Gets a halo mask from a file or makes a new one.
-        :param mask_file (str): specify an hdf5 file to output the halo mask.
-        :param map_file (str): specify a text file to output the halo map 
-               (locations in image of halos).
-        """
-
         # Get halo map if map_file given.
         if map_file is not None and not os.path.exists(map_file):
             light_cone_halo_map(self, map_file=map_file, **kwargs)
@@ -240,22 +252,34 @@
     def project_light_cone(self, field, weight_field=None, apply_halo_mask=False, node=None,
                            save_stack=True, save_slice_images=False, cmap_name='algae', 
                            flatten_stack=False, photon_field=False):
-        """
-        Create projections for light cone, then add them together.
-        :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
-               projections.  Default: None.
-        :param apply_halo_mask (bool): if True, a boolean mask is apply to the light cone projection.  See below for a 
-               description of halo masks.  Default: False.
-        :param node (str): a prefix to be prepended to the node name under which the projection data is serialized.  
-               Default: None.
-        :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
-               an hdf5 file.  Default: True.
-        :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
-        :param cmap_name (str): color map for images.  Default: 'algae'.
-        :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
-               in order to save memory.  This is generally not necessary.  Default: False.
-        :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 
-               is the luminosity distance between the observer and the slice redshift.  Default: False.
+        r"""Create projections for light cone, then add them together.
+
+        Parameters
+        ----------
+        weight_field : str
+            the weight field of the projection.  This has the same meaning as
+            in standard projections.  Default: None.
+        apply_halo_mask : bool
+            if True, a boolean mask is apply to the light cone projection.  See
+            below for a description of halo masks.  Default: False.
+        node : string
+            a prefix to be prepended to the node name under which the
+            projection data is serialized.  Default: None.
+        save_stack : bool
+            if True, the unflatted light cone data including each individual
+            slice is written to an hdf5 file.  Default: True.
+        save_slice_images : bool
+            save images for each individual projection slice.  Default: False.
+        cmap_name : string
+            color map for images.  Default: 'algae'.
+        flatten_stack : bool
+            if True, the light cone stack is continually flattened each time a
+            slice is added in order to save memory.  This is generally not
+            necessary.  Default: False.
+        photon_field : bool
+            if True, the projection data for each slice is decremented by 4 Pi
+            R^2`, where R is the luminosity distance between the observer and
+            the slice redshift.  Default: False.
         """
 
         # Clear projection stack.


--- a/yt/data_objects/derived_quantities.py	Thu Aug 25 22:03:55 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Thu Aug 25 22:36:52 2011 -0400
@@ -173,9 +173,13 @@
     This function returns the location of the center
     of mass. By default, it computes of the *non-particle* data in the object. 
 
-    :param use_cells: if True, will include the cell mass (default: True)
-    :param use_particles: if True, will include the particles in the 
-    object (default: False)
+    Parameters
+    ----------
+
+    use_cells : bool
+        If True, will include the cell mass (default: True)
+    use_particles : bool
+        if True, will include the particles in the object (default: False)
     """
     x = y = z = den = 0
     if use_cells: 


--- a/yt/frontends/orion/data_structures.py	Thu Aug 25 22:03:55 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Thu Aug 25 22:36:52 2011 -0400
@@ -444,16 +444,12 @@
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='orion_native', paranoia=False,
                  storage_filename = None):
-        """need to override for Orion file structure.
-
-        the paramfile is usually called "inputs"
+        """
+        The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
         plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
         """
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia


--- a/yt/funcs.py	Thu Aug 25 22:03:55 2011 -0400
+++ b/yt/funcs.py	Thu Aug 25 22:36:52 2011 -0400
@@ -137,17 +137,9 @@
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
-    """
+    r"""
     Decorator for seeing how long a given function takes, depending on whether
     or not the global 'yt.timefunctions' config parameter is set.
-
-    This can be used like so:
-
-    .. code-block:: python
-
-       @time_execution
-    def some_longrunning_function(...):
-
     """
     @wraps(func)
     def wrapper(*arg, **kw):


http://bitbucket.org/yt_analysis/yt/changeset/ded11aca556e/
changeset:   ded11aca556e
branch:      yt
user:        MatthewTurk
date:        2011-08-26 16:28:08
summary:     Updating Cython to 0.15
affected #:  1 file (0 bytes)

--- a/doc/install_script.sh	Thu Aug 25 22:36:52 2011 -0400
+++ b/doc/install_script.sh	Fri Aug 26 10:28:08 2011 -0400
@@ -306,7 +306,7 @@
 get_enzotools mercurial-1.8.1.tar.gz
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-1.3.1.tar.gz
-get_enzotools Cython-0.14.tar.gz
+get_enzotools Cython-0.15.tar.gz
 get_enzotools Forthon-0.8.4.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
@@ -522,7 +522,7 @@
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
 do_setup_py ipython-0.10
 do_setup_py h5py-1.3.1
-do_setup_py Cython-0.14
+do_setup_py Cython-0.15
 [ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.4
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


http://bitbucket.org/yt_analysis/yt/changeset/f489bd3b59c0/
changeset:   f489bd3b59c0
branch:      yt
user:        MatthewTurk
date:        2011-08-26 16:29:03
summary:     Bringing in Casey's readability changes.
affected #:  1 file (78 bytes)

--- a/yt/data_objects/field_info_container.py	Fri Aug 26 10:28:08 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Fri Aug 26 10:29:03 2011 -0400
@@ -26,19 +26,20 @@
 """
 
 import types
-import numpy as na
 import inspect
 import copy
 import itertools
 
+import numpy as na
+
 from yt.funcs import *
 
 class FieldInfoContainer(object): # We are all Borg.
     """
     This is a generic field container.  It contains a list of potential derived
-    fields, all of which know how to act on a data object and return a value.  This
-    object handles converting units as well as validating the availability of a
-    given field.
+    fields, all of which know how to act on a data object and return a value.
+    This object handles converting units as well as validating the availability
+    of a given field.
     """
     _shared_state = {}
     _universal_field_list = {}
@@ -51,22 +52,25 @@
             return self._universal_field_list[key]
         raise KeyError
     def keys(self):
-        """
-        Return all the field names this object knows about.
-        """
+        """ Return all the field names this object knows about. """
         return self._universal_field_list.keys()
+
     def __iter__(self):
         return self._universal_field_list.iterkeys()
+
     def __setitem__(self, key, val):
         self._universal_field_list[key] = val
+
     def has_key(self, key):
         return key in self._universal_field_list
+
     def add_field(self, name, function = None, **kwargs):
         """
         Add a new field, along with supplemental metadata, to the list of
         available fields.  This respects a number of arguments, all of which
         are passed on to the constructor for
         :class:`~yt.data_objects.api.DerivedField`.
+
         """
         if function == None:
             def create_function(function):
@@ -74,6 +78,7 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
 
@@ -89,14 +94,18 @@
 class CodeFieldInfoContainer(FieldInfoContainer):
     def __setitem__(self, key, val):
         self._field_list[key] = val
+
     def __iter__(self):
         return itertools.chain(self._field_list.iterkeys(),
-                        self._universal_field_list.iterkeys())
+                               self._universal_field_list.iterkeys())
+
     def keys(self):
         return set(self._field_list.keys() + self._universal_field_list.keys())
+
     def has_key(self, key):
         return key in self._universal_field_list \
             or key in self._field_list
+
     def __getitem__(self, key):
         if key in self._field_list:
             return self._field_list[key]
@@ -111,6 +120,7 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __str__(self):
         return "(%s, %s)" % (self.ghost_zones, self.fields)
 
@@ -121,18 +131,21 @@
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
+
     def __str__(self):
         return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
+
     def __str__(self):
         return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
+
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
@@ -141,18 +154,19 @@
     NumberOfParticles = 1
     _read_exception = None
     _id_offset = 0
+
     def __init__(self, nd = 16, pf = None, flat = False):
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
-        self.ActiveDimensions = [nd,nd,nd]
-        self.LeftEdge = [0.0,0.0,0.0]
-        self.RightEdge = [1.0,1.0,1.0]
+        self.ActiveDimensions = [nd, nd, nd]
+        self.LeftEdge = [0.0, 0.0, 0.0]
+        self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = na.ones(3, "float64")
         self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
-        if pf is None:
+        if pf is None:  # setup defaults
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
@@ -168,17 +182,18 @@
             io = fake_io()
             def get_smallest_dx(self):
                 return 1.0
+
         self.hierarchy = fake_hierarchy()
         self.requested = []
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd,nd,nd), dtype='float64')
-                + 1e-4*na.random.random((nd,nd,nd)))
+                lambda: na.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*na.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd*nd*nd), dtype='float64')
-                + 1e-4*na.random.random((nd*nd*nd)))
+                lambda: na.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*na.random.random((nd * nd * nd)))
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(item) and \
@@ -190,15 +205,20 @@
                 nfd = FieldDetector(self.nd+ngz*2)
                 nfd._num_ghost_zones = ngz
                 vv = FI[item](nfd)
-                if ngz > 0: vv = vv[ngz:-ngz,ngz:-ngz,ngz:-ngz]
+                if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]
+
                 for i in nfd.requested:
                     if i not in self.requested: self.requested.append(i)
+
                 for i in nfd.requested_parameters:
-                    if i not in self.requested_parameters: self.requested_parameters.append(i)
+                    if i not in self.requested_parameters:
+                        self.requested_parameters.append(i)
+
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
                 return self[item]
+
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
@@ -249,23 +269,29 @@
         :param display_name: a name used in the plots
         :param projection_conversion: which unit should we multiply by in a
                                       projection?
+
         """
         self.name = name
         self._function = function
+
         if validators:
             self.validators = ensure_list(validators)
         else:
             self.validators = []
+
         self.take_log = take_log
         self._units = units
         self._projected_units = projected_units
+
         if not convert_function:
             convert_function = lambda a: 1.0
         self._convert_function = convert_function
         self._particle_convert_function = particle_convert_function
+
         self.particle_type = particle_type
         self.vector_field = vector_field
         self.projection_conversion = projection_conversion
+
         self.display_field = display_field
         self.display_name = display_name
         self.not_in_all = not_in_all
@@ -274,6 +300,7 @@
         """
         This raises an exception of the appropriate type if the set of
         validation mechanisms are not met, and otherwise returns True.
+
         """
         for validator in self.validators:
             validator(data)
@@ -283,6 +310,7 @@
     def get_dependencies(self, *args, **kwargs):
         """
         This returns a list of names of fields that this field depends on.
+
         """
         e = FieldDetector(*args, **kwargs)
         if self._function.func_name == '<lambda>':
@@ -292,47 +320,50 @@
         return e
 
     def get_units(self):
-        """
-        Return a string describing the units.
-        """
+        """ Return a string describing the units.  """
         return self._units
 
     def get_projected_units(self):
         """
         Return a string describing the units if the field has been projected.
+
         """
         return self._projected_units
 
     def __call__(self, data):
-        """
-        Return the value of the field in a given *data* object.
-        """
+        """ Return the value of the field in a given *data* object.  """
         ii = self.check_available(data)
         original_fields = data.keys() # Copy
         dd = self._function(self, data)
         dd *= self._convert_function(data)
+
         for field_name in data.keys():
             if field_name not in original_fields:
                 del data[field_name]
+
         return dd
 
     def get_source(self):
         """
         Return a string containing the source of the function (if possible.)
+
         """
         return inspect.getsource(self._function)
 
     def get_label(self, projected=False):
         """
         Return a data label for the given field, inluding units.
+
         """
         name = self.name
         if self.display_name is not None: name = self.display_name
         data_label = r"$\rm{%s}" % name
+
         if projected: units = self.get_projected_units()
         else: units = self.get_units()
         if units != "": data_label += r"\/\/ (%s)" % (units)
         data_label += r"$"
+
         return data_label
 
     def particle_convert(self, data):
@@ -347,9 +378,11 @@
     def __init__(self, parameters):
         """
         This validator ensures that the parameter file has a given parameter.
+
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.parameters:
@@ -362,11 +395,13 @@
 class ValidateDataField(FieldValidator):
     def __init__(self, field):
         """
-        This validator ensures that the output file has a given data field stored
-        in it.
+        This validator ensures that the output file has a given data field
+        stored in it.
+
         """
         FieldValidator.__init__(self)
         self.fields = ensure_list(field)
+
     def __call__(self, data):
         doesnt_have = []
         if isinstance(data, FieldDetector): return True
@@ -375,15 +410,19 @@
                 doesnt_have.append(f)
         if len(doesnt_have) > 0:
             raise NeedsDataField(doesnt_have)
+
         return True
 
 class ValidateProperty(FieldValidator):
     def __init__(self, prop):
         """
-        This validator ensures that the data object has a given python attribute.
+        This validator ensures that the data object has a given python
+        attribute.
+
         """
         FieldValidator.__init__(self)
         self.prop = ensure_list(prop)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.prop:
@@ -391,6 +430,7 @@
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
+
         return True
 
 class ValidateSpatial(FieldValidator):
@@ -398,13 +438,15 @@
         """
         This validator ensures that the data handed to the field is of spatial
         nature -- that is to say, 3-D.
+
         """
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __call__(self, data):
-        # When we say spatial information, we really mean
-        # that it has a three-dimensional data structure
+        # When we say spatial information, we really mean that it has a
+        # three-dimensional data structure
         #if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
@@ -417,8 +459,10 @@
         """
         This validator ensures that the data handed to the field is an actual
         grid patch, not a covering grid of any kind.
+
         """
         FieldValidator.__init__(self)
+
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True


http://bitbucket.org/yt_analysis/yt/changeset/35c5d4fa6f91/
changeset:   35c5d4fa6f91
branch:      yt
user:        MatthewTurk
date:        2011-08-26 16:45:29
summary:     Bumping version of unstable branch to 2.3dev.
affected #:  1 file (0 bytes)

--- a/setup.py	Fri Aug 26 10:29:03 2011 -0400
+++ b/setup.py	Fri Aug 26 10:45:29 2011 -0400
@@ -81,7 +81,7 @@
 
 import setuptools
 
-VERSION = "2.2dev"
+VERSION = "2.3dev"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 


http://bitbucket.org/yt_analysis/yt/changeset/ad82cfb68b54/
changeset:   ad82cfb68b54
branch:      yt
user:        caseywstark
date:        2011-08-26 21:26:16
summary:     Fixing dumb path bug (my bad)
affected #:  1 file (226 bytes)

--- a/yt/frontends/nyx/data_structures.py	Fri Aug 26 13:57:33 2011 -0600
+++ b/yt/frontends/nyx/data_structures.py	Fri Aug 26 12:26:16 2011 -0700
@@ -548,11 +548,17 @@
         """
         self.storage_filename = storage_filename
         self.parameter_filename = param_filename
-        self.parameter_file_path = os.path.abspath(self.parameter_filename)
         self.fparameter_filename = fparam_filename
-        self.fparameter_file_path = os.path.abspath(self.fparameter_filename)
+
         self.path = os.path.abspath(plotname)  # data folder
 
+        # silly inputs and probin file thing (this is on the Nyx todo list)
+        self.parameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                self.parameter_filename)
+
+        self.fparameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                 self.fparameter_filename)
+
         self.fparameters = {}
 
         # @todo: quick fix...


http://bitbucket.org/yt_analysis/yt/changeset/73158293be42/
changeset:   73158293be42
branch:      yt
user:        MatthewTurk
date:        2011-08-26 22:02:30
summary:     Adding distribute_setup.py explicitly.
affected #:  1 file (28 bytes)

--- a/MANIFEST.in	Fri Aug 26 10:45:29 2011 -0400
+++ b/MANIFEST.in	Fri Aug 26 16:02:30 2011 -0400
@@ -1,2 +1,3 @@
+include distribute_setup.py
 recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
 recursive-include yt/ *.pyx *.pxd *.hh *.h README* 


http://bitbucket.org/yt_analysis/yt/changeset/d3bdde3ea8b1/
changeset:   d3bdde3ea8b1
branch:      yt
user:        MatthewTurk
date:        2011-08-26 22:07:14
summary:     Merge
affected #:  2 files (399 bytes)

--- a/yt/frontends/nyx/data_structures.py	Fri Aug 26 16:02:30 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Fri Aug 26 16:07:14 2011 -0400
@@ -548,11 +548,17 @@
         """
         self.storage_filename = storage_filename
         self.parameter_filename = param_filename
-        self.parameter_file_path = os.path.abspath(self.parameter_filename)
         self.fparameter_filename = fparam_filename
-        self.fparameter_file_path = os.path.abspath(self.fparameter_filename)
+
         self.path = os.path.abspath(plotname)  # data folder
 
+        # silly inputs and probin file thing (this is on the Nyx todo list)
+        self.parameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                self.parameter_filename)
+
+        self.fparameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                 self.fparameter_filename)
+
         self.fparameters = {}
 
         # @todo: quick fix...


--- a/yt/visualization/plot_modifications.py	Fri Aug 26 16:02:30 2011 -0400
+++ b/yt/visualization/plot_modifications.py	Fri Aug 26 16:07:14 2011 -0400
@@ -81,18 +81,20 @@
         else:
             xv = "%s-velocity" % (x_names[plot.data.axis])
             yv = "%s-velocity" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor, self.scale, self.scale_units)
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units)
         return qcb(plot)
 
 class MagFieldCallback(PlotCallback):
     _type_name = "magnetic_field"
-    def __init__(self, factor=16):
+    def __init__(self, factor=16, scale=None, scale_units=None):
         """
         Adds a 'quiver' plot of magnetic field to the plot, skipping all but
         every *factor* datapoint
         """
         PlotCallback.__init__(self)
         self.factor = factor
+        self.scale  = scale
+        self.scale_units = scale_units
 
     def __call__(self, plot):
         # Instantiation of these is cheap
@@ -101,12 +103,12 @@
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor)
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units)
         return qcb(plot)
 
 class QuiverCallback(PlotCallback):
     _type_name = "quiver"
-    def __init__(self, field_x, field_y, factor, scale, scale_units):
+    def __init__(self, field_x, field_y, factor, scale=None, scale_units=None):
         """
         Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints


http://bitbucket.org/yt_analysis/yt/changeset/53f65fd1e032/
changeset:   53f65fd1e032
branch:      yt
user:        MatthewTurk
date:        2011-08-29 00:58:31
summary:     Merging
affected #:  16 files (2.6 KB)

--- a/MANIFEST.in	Thu Aug 25 15:01:26 2011 -0400
+++ b/MANIFEST.in	Sun Aug 28 18:58:31 2011 -0400
@@ -1,2 +1,3 @@
+include distribute_setup.py
 recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
 recursive-include yt/ *.pyx *.pxd *.hh *.h README* 


--- a/doc/install_script.sh	Thu Aug 25 15:01:26 2011 -0400
+++ b/doc/install_script.sh	Sun Aug 28 18:58:31 2011 -0400
@@ -306,7 +306,7 @@
 get_enzotools mercurial-1.8.1.tar.gz
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-1.3.1.tar.gz
-get_enzotools Cython-0.14.tar.gz
+get_enzotools Cython-0.15.tar.gz
 get_enzotools Forthon-0.8.4.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
@@ -522,7 +522,7 @@
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
 do_setup_py ipython-0.10
 do_setup_py h5py-1.3.1
-do_setup_py Cython-0.14
+do_setup_py Cython-0.15
 [ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.4
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


--- a/setup.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/setup.py	Sun Aug 28 18:58:31 2011 -0400
@@ -81,7 +81,7 @@
 
 import setuptools
 
-VERSION = "2.2dev"
+VERSION = "2.3dev"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 


--- a/yt/analysis_modules/api.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/analysis_modules/api.py	Sun Aug 28 18:58:31 2011 -0400
@@ -79,7 +79,6 @@
     ExtractedParameterFile
 
 from .level_sets.api import \
-    GridConsiderationQueue, \
     coalesce_join_tree, \
     identify_contours, \
     Clump, \


--- a/yt/analysis_modules/light_cone/light_cone.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Sun Aug 28 18:58:31 2011 -0400
@@ -122,12 +122,20 @@
                                                                 deltaz_min=self.deltaz_min)
 
     def calculate_light_cone_solution(self, seed=None, filename=None):
-        """
-        Create list of projections to be added together to make the light cone.
-        :param seed (int): the seed for the random number generator.  Any light cone solution 
-               can be reproduced by giving the same random seed.  Default: None (each solution 
-               will be distinct).
-        :param filename (str): if given, a text file detailing the solution will be written out.  Default: None.
+        r"""Create list of projections to be added together to make the light cone.
+
+        Several sentences providing an extended description. Refer to
+        variables using back-ticks, e.g. `var`.
+
+        Parameters
+        ----------
+        seed : int
+            The seed for the random number generator.  Any light cone solution
+            can be reproduced by giving the same random seed.  Default: None
+            (each solution will be distinct).
+        filename : string
+            If given, a text file detailing the solution will be written out.
+            Default: None.
         """
 
         # Don't use box coherence with maximum projection depths.
@@ -209,13 +217,17 @@
             self._save_light_cone_solution(filename=filename)
 
     def get_halo_mask(self, mask_file=None, map_file=None, **kwargs):
+        r"""Gets a halo mask from a file or makes a new one.
+
+        Parameters
+        ----------
+        mask_file : string, optional
+            An HDF5 file to which to output the halo mask
+        map_file : string, optional
+            A text file to which to output the halo map (locations in the
+            images of the halos
+
         """
-        Gets a halo mask from a file or makes a new one.
-        :param mask_file (str): specify an hdf5 file to output the halo mask.
-        :param map_file (str): specify a text file to output the halo map 
-               (locations in image of halos).
-        """
-
         # Get halo map if map_file given.
         if map_file is not None and not os.path.exists(map_file):
             light_cone_halo_map(self, map_file=map_file, **kwargs)
@@ -240,22 +252,34 @@
     def project_light_cone(self, field, weight_field=None, apply_halo_mask=False, node=None,
                            save_stack=True, save_slice_images=False, cmap_name='algae', 
                            flatten_stack=False, photon_field=False):
-        """
-        Create projections for light cone, then add them together.
-        :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
-               projections.  Default: None.
-        :param apply_halo_mask (bool): if True, a boolean mask is apply to the light cone projection.  See below for a 
-               description of halo masks.  Default: False.
-        :param node (str): a prefix to be prepended to the node name under which the projection data is serialized.  
-               Default: None.
-        :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
-               an hdf5 file.  Default: True.
-        :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
-        :param cmap_name (str): color map for images.  Default: 'algae'.
-        :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
-               in order to save memory.  This is generally not necessary.  Default: False.
-        :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 
-               is the luminosity distance between the observer and the slice redshift.  Default: False.
+        r"""Create projections for light cone, then add them together.
+
+        Parameters
+        ----------
+        weight_field : str
+            the weight field of the projection.  This has the same meaning as
+            in standard projections.  Default: None.
+        apply_halo_mask : bool
+            if True, a boolean mask is apply to the light cone projection.  See
+            below for a description of halo masks.  Default: False.
+        node : string
+            a prefix to be prepended to the node name under which the
+            projection data is serialized.  Default: None.
+        save_stack : bool
+            if True, the unflatted light cone data including each individual
+            slice is written to an hdf5 file.  Default: True.
+        save_slice_images : bool
+            save images for each individual projection slice.  Default: False.
+        cmap_name : string
+            color map for images.  Default: 'algae'.
+        flatten_stack : bool
+            if True, the light cone stack is continually flattened each time a
+            slice is added in order to save memory.  This is generally not
+            necessary.  Default: False.
+        photon_field : bool
+            if True, the projection data for each slice is decremented by 4 Pi
+            R^2`, where R is the luminosity distance between the observer and
+            the slice redshift.  Default: False.
         """
 
         # Clear projection stack.


--- a/yt/data_objects/derived_quantities.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Sun Aug 28 18:58:31 2011 -0400
@@ -173,9 +173,13 @@
     This function returns the location of the center
     of mass. By default, it computes of the *non-particle* data in the object. 
 
-    :param use_cells: if True, will include the cell mass (default: True)
-    :param use_particles: if True, will include the particles in the 
-    object (default: False)
+    Parameters
+    ----------
+
+    use_cells : bool
+        If True, will include the cell mass (default: True)
+    use_particles : bool
+        if True, will include the particles in the object (default: False)
     """
     x = y = z = den = 0
     if use_cells: 


--- a/yt/data_objects/field_info_container.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Sun Aug 28 18:58:31 2011 -0400
@@ -26,19 +26,20 @@
 """
 
 import types
-import numpy as na
 import inspect
 import copy
 import itertools
 
+import numpy as na
+
 from yt.funcs import *
 
 class FieldInfoContainer(object): # We are all Borg.
     """
     This is a generic field container.  It contains a list of potential derived
-    fields, all of which know how to act on a data object and return a value.  This
-    object handles converting units as well as validating the availability of a
-    given field.
+    fields, all of which know how to act on a data object and return a value.
+    This object handles converting units as well as validating the availability
+    of a given field.
     """
     _shared_state = {}
     _universal_field_list = {}
@@ -51,22 +52,25 @@
             return self._universal_field_list[key]
         raise KeyError
     def keys(self):
-        """
-        Return all the field names this object knows about.
-        """
+        """ Return all the field names this object knows about. """
         return self._universal_field_list.keys()
+
     def __iter__(self):
         return self._universal_field_list.iterkeys()
+
     def __setitem__(self, key, val):
         self._universal_field_list[key] = val
+
     def has_key(self, key):
         return key in self._universal_field_list
+
     def add_field(self, name, function = None, **kwargs):
         """
         Add a new field, along with supplemental metadata, to the list of
         available fields.  This respects a number of arguments, all of which
         are passed on to the constructor for
         :class:`~yt.data_objects.api.DerivedField`.
+
         """
         if function == None:
             def create_function(function):
@@ -74,6 +78,7 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
 
@@ -89,14 +94,18 @@
 class CodeFieldInfoContainer(FieldInfoContainer):
     def __setitem__(self, key, val):
         self._field_list[key] = val
+
     def __iter__(self):
         return itertools.chain(self._field_list.iterkeys(),
-                        self._universal_field_list.iterkeys())
+                               self._universal_field_list.iterkeys())
+
     def keys(self):
         return set(self._field_list.keys() + self._universal_field_list.keys())
+
     def has_key(self, key):
         return key in self._universal_field_list \
             or key in self._field_list
+
     def __getitem__(self, key):
         if key in self._field_list:
             return self._field_list[key]
@@ -111,6 +120,7 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __str__(self):
         return "(%s, %s)" % (self.ghost_zones, self.fields)
 
@@ -121,18 +131,21 @@
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
+
     def __str__(self):
         return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
+
     def __str__(self):
         return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
+
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
@@ -141,18 +154,19 @@
     NumberOfParticles = 1
     _read_exception = None
     _id_offset = 0
+
     def __init__(self, nd = 16, pf = None, flat = False):
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
-        self.ActiveDimensions = [nd,nd,nd]
-        self.LeftEdge = [0.0,0.0,0.0]
-        self.RightEdge = [1.0,1.0,1.0]
+        self.ActiveDimensions = [nd, nd, nd]
+        self.LeftEdge = [0.0, 0.0, 0.0]
+        self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = na.ones(3, "float64")
         self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
-        if pf is None:
+        if pf is None:  # setup defaults
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
@@ -168,17 +182,18 @@
             io = fake_io()
             def get_smallest_dx(self):
                 return 1.0
+
         self.hierarchy = fake_hierarchy()
         self.requested = []
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd,nd,nd), dtype='float64')
-                + 1e-4*na.random.random((nd,nd,nd)))
+                lambda: na.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*na.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd*nd*nd), dtype='float64')
-                + 1e-4*na.random.random((nd*nd*nd)))
+                lambda: na.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*na.random.random((nd * nd * nd)))
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(item) and \
@@ -190,15 +205,20 @@
                 nfd = FieldDetector(self.nd+ngz*2)
                 nfd._num_ghost_zones = ngz
                 vv = FI[item](nfd)
-                if ngz > 0: vv = vv[ngz:-ngz,ngz:-ngz,ngz:-ngz]
+                if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]
+
                 for i in nfd.requested:
                     if i not in self.requested: self.requested.append(i)
+
                 for i in nfd.requested_parameters:
-                    if i not in self.requested_parameters: self.requested_parameters.append(i)
+                    if i not in self.requested_parameters:
+                        self.requested_parameters.append(i)
+
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
                 return self[item]
+
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
@@ -249,23 +269,29 @@
         :param display_name: a name used in the plots
         :param projection_conversion: which unit should we multiply by in a
                                       projection?
+
         """
         self.name = name
         self._function = function
+
         if validators:
             self.validators = ensure_list(validators)
         else:
             self.validators = []
+
         self.take_log = take_log
         self._units = units
         self._projected_units = projected_units
+
         if not convert_function:
             convert_function = lambda a: 1.0
         self._convert_function = convert_function
         self._particle_convert_function = particle_convert_function
+
         self.particle_type = particle_type
         self.vector_field = vector_field
         self.projection_conversion = projection_conversion
+
         self.display_field = display_field
         self.display_name = display_name
         self.not_in_all = not_in_all
@@ -274,6 +300,7 @@
         """
         This raises an exception of the appropriate type if the set of
         validation mechanisms are not met, and otherwise returns True.
+
         """
         for validator in self.validators:
             validator(data)
@@ -283,6 +310,7 @@
     def get_dependencies(self, *args, **kwargs):
         """
         This returns a list of names of fields that this field depends on.
+
         """
         e = FieldDetector(*args, **kwargs)
         if self._function.func_name == '<lambda>':
@@ -292,47 +320,50 @@
         return e
 
     def get_units(self):
-        """
-        Return a string describing the units.
-        """
+        """ Return a string describing the units.  """
         return self._units
 
     def get_projected_units(self):
         """
         Return a string describing the units if the field has been projected.
+
         """
         return self._projected_units
 
     def __call__(self, data):
-        """
-        Return the value of the field in a given *data* object.
-        """
+        """ Return the value of the field in a given *data* object.  """
         ii = self.check_available(data)
         original_fields = data.keys() # Copy
         dd = self._function(self, data)
         dd *= self._convert_function(data)
+
         for field_name in data.keys():
             if field_name not in original_fields:
                 del data[field_name]
+
         return dd
 
     def get_source(self):
         """
         Return a string containing the source of the function (if possible.)
+
         """
         return inspect.getsource(self._function)
 
     def get_label(self, projected=False):
         """
         Return a data label for the given field, inluding units.
+
         """
         name = self.name
         if self.display_name is not None: name = self.display_name
         data_label = r"$\rm{%s}" % name
+
         if projected: units = self.get_projected_units()
         else: units = self.get_units()
         if units != "": data_label += r"\/\/ (%s)" % (units)
         data_label += r"$"
+
         return data_label
 
     def particle_convert(self, data):
@@ -347,9 +378,11 @@
     def __init__(self, parameters):
         """
         This validator ensures that the parameter file has a given parameter.
+
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.parameters:
@@ -362,11 +395,13 @@
 class ValidateDataField(FieldValidator):
     def __init__(self, field):
         """
-        This validator ensures that the output file has a given data field stored
-        in it.
+        This validator ensures that the output file has a given data field
+        stored in it.
+
         """
         FieldValidator.__init__(self)
         self.fields = ensure_list(field)
+
     def __call__(self, data):
         doesnt_have = []
         if isinstance(data, FieldDetector): return True
@@ -375,15 +410,19 @@
                 doesnt_have.append(f)
         if len(doesnt_have) > 0:
             raise NeedsDataField(doesnt_have)
+
         return True
 
 class ValidateProperty(FieldValidator):
     def __init__(self, prop):
         """
-        This validator ensures that the data object has a given python attribute.
+        This validator ensures that the data object has a given python
+        attribute.
+
         """
         FieldValidator.__init__(self)
         self.prop = ensure_list(prop)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.prop:
@@ -391,6 +430,7 @@
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
+
         return True
 
 class ValidateSpatial(FieldValidator):
@@ -398,13 +438,15 @@
         """
         This validator ensures that the data handed to the field is of spatial
         nature -- that is to say, 3-D.
+
         """
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __call__(self, data):
-        # When we say spatial information, we really mean
-        # that it has a three-dimensional data structure
+        # When we say spatial information, we really mean that it has a
+        # three-dimensional data structure
         #if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
@@ -417,8 +459,10 @@
         """
         This validator ensures that the data handed to the field is an actual
         grid patch, not a covering grid of any kind.
+
         """
         FieldValidator.__init__(self)
+
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True


--- a/yt/data_objects/object_finding_mixin.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/data_objects/object_finding_mixin.py	Sun Aug 28 18:58:31 2011 -0400
@@ -118,7 +118,7 @@
         as the input *fields*.
         
         Parameters
-        ---------
+        ----------
         fields : string or list of strings
             The field(s) that will be returned.
         


--- a/yt/frontends/enzo/data_structures.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/frontends/enzo/data_structures.py	Sun Aug 28 18:58:31 2011 -0400
@@ -684,9 +684,9 @@
         self._hierarchy_class = EnzoHierarchy1D
         self._fieldinfo_class = Enzo1DFieldContainer
         self.domain_left_edge = \
-            na.concatenate([self["DomainLeftEdge"], [0.0, 0.0]])
+            na.concatenate([[self.domain_left_edge], [0.0, 0.0]])
         self.domain_right_edge = \
-            na.concatenate([self["DomainRightEdge"], [1.0, 1.0]])
+            na.concatenate([[self.domain_right_edge], [1.0, 1.0]])
 
     def _setup_2d(self):
         self._hierarchy_class = EnzoHierarchy2D
@@ -796,8 +796,13 @@
         self.refine_by = self.parameters["RefineBy"]
         self.dimensionality = self.parameters["TopGridRank"]
         self.domain_dimensions = self.parameters["TopGridDimensions"]
-        self.domain_left_edge = self.parameters["DomainLeftEdge"].copy()
-        self.domain_right_edge = self.parameters["DomainRightEdge"].copy()
+        if self.dimensionality > 1:
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"]).copy()
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"]).copy()
+        else:
+            self.domain_left_edge = na.array(self.parameters["DomainLeftEdge"])
+            self.domain_right_edge = na.array(self.parameters["DomainRightEdge"])
+
         self.current_time = self.parameters["InitialTime"]
         # To be enabled when we can break old pickles:
         #if "MetaDataSimulationUUID" in self.parameters:
@@ -835,7 +840,7 @@
             self._setup_nounits_units()
         self.time_units['1'] = 1
         self.units['1'] = 1
-        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         seconds = self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)


--- a/yt/frontends/nyx/data_structures.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Sun Aug 28 18:58:31 2011 -0400
@@ -548,11 +548,17 @@
         """
         self.storage_filename = storage_filename
         self.parameter_filename = param_filename
-        self.parameter_file_path = os.path.abspath(self.parameter_filename)
         self.fparameter_filename = fparam_filename
-        self.fparameter_file_path = os.path.abspath(self.fparameter_filename)
+
         self.path = os.path.abspath(plotname)  # data folder
 
+        # silly inputs and probin file thing (this is on the Nyx todo list)
+        self.parameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                self.parameter_filename)
+
+        self.fparameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                 self.fparameter_filename)
+
         self.fparameters = {}
 
         # @todo: quick fix...


--- a/yt/frontends/orion/data_structures.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Sun Aug 28 18:58:31 2011 -0400
@@ -444,16 +444,12 @@
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='orion_native', paranoia=False,
                  storage_filename = None):
-        """need to override for Orion file structure.
-
-        the paramfile is usually called "inputs"
+        """
+        The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
         plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
         """
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia


--- a/yt/frontends/ramses/data_structures.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Sun Aug 28 18:58:31 2011 -0400
@@ -330,6 +330,14 @@
                                            * rheader['boxlen']
         self.domain_left_edge = na.zeros(3, dtype='float64')
         self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        # This is likely not true, but I am not sure how to otherwise
+        # distinguish them.
+        mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")
+        self.cosmological_simulation = 1
+        self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+        self.omega_lambda = rheader["omega_l"]
+        self.omega_matter = rheader["omega_m"]
+        self.hubble_constant = rheader["H0"]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


--- a/yt/funcs.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/funcs.py	Sun Aug 28 18:58:31 2011 -0400
@@ -137,17 +137,9 @@
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
-    """
+    r"""
     Decorator for seeing how long a given function takes, depending on whether
     or not the global 'yt.timefunctions' config parameter is set.
-
-    This can be used like so:
-
-    .. code-block:: python
-
-       @time_execution
-    def some_longrunning_function(...):
-
     """
     @wraps(func)
     def wrapper(*arg, **kw):


--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Sun Aug 28 18:58:31 2011 -0400
@@ -289,18 +289,25 @@
         if le is None:
             self.domain_left_edge = pf.domain_left_edge
         else:
-            self.domain_left_edge = na.clip(na.array(le),pf.domain_left_edge, pf.domain_right_edge)
+            self.domain_left_edge = na.array(le)
         if re is None:
             self.domain_right_edge = pf.domain_right_edge
         else:
-            self.domain_right_edge = na.clip(na.array(re),pf.domain_left_edge, pf.domain_right_edge)
+            self.domain_right_edge = na.array(re)
 
+        root_grids = pf.hierarchy.get_levels().next()
+        rgdds = root_grids[0].dds
+        self.domain_left_edge = (self.domain_left_edge/rgdds).astype('int64')*rgdds
+        self.domain_right_edge = ((self.domain_right_edge/rgdds).astype('int64')+1)*rgdds
+
+        self.domain_left_edge = na.clip(self.domain_left_edge,pf.domain_left_edge, pf.domain_right_edge)
+        self.domain_right_edge = na.clip(self.domain_right_edge,pf.domain_left_edge, pf.domain_right_edge)
+        
         self.my_l_corner = self.domain_left_edge
         self.my_r_corner = self.domain_right_edge
 
         mylog.info('Making kd tree from le %s to %s'% (self.domain_left_edge, self.domain_right_edge))
-        root_grids = pf.hierarchy.get_levels().next()
-
+        
         root_l_data = na.array([grid.LeftEdge for grid in root_grids])
         root_r_data = na.array([grid.RightEdge for grid in root_grids])
         root_we_want = na.all(root_l_data < self.my_r_corner,axis=1)*\


--- a/yt/utilities/command_line.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/utilities/command_line.py	Sun Aug 28 18:58:31 2011 -0400
@@ -330,13 +330,44 @@
         cmdln.Cmdln.__init__(self, *args, **kwargs)
         cmdln.Cmdln.do_help.aliases.append("h")
 
-    def do_loop(self, subcmd, opts, *args):
+    def do_update(self, subcmd, opts):
         """
-        Interactive loop
+        Update the yt installation to the most recent version
 
+        ${cmd_usage}
         ${cmd_option_list}
         """
-        self.cmdloop()
+        import pkg_resources
+        yt_provider = pkg_resources.get_provider("yt")
+        path = os.path.dirname(yt_provider.module_path)
+        print
+        print "yt module located at:"
+        print "    %s" % (path)
+        update_supp = False
+        if "YT_DEST" in os.environ:
+            spath = os.path.join(
+                     os.environ["YT_DEST"], "src", "yt-supplemental")
+            if os.path.isdir(spath):
+                print "The supplemental repositories are located at:"
+                print "    %s" % (spath)
+                update_supp = True
+        vstring = None
+        if "site-packages" not in path:
+            vstring = _get_hg_version(path)
+            print
+            print "The current version of the code is:"
+            print
+            print "---"
+            print vstring.strip()
+            print "---"
+            print
+            print "This installation CAN be automatically updated."
+            print "Updated successfully."
+        else:
+            print
+            print "YT site-packages not in path, so you must"
+            print "update this installation manually."
+            print
 
     @cmdln.option("-u", "--update-source", action="store_true",
                   default = False,
@@ -346,7 +377,7 @@
                   help="File into which the current revision number will be stored")
     def do_instinfo(self, subcmd, opts):
         """
-        ${cmd_name}: Get some information about the yt installation
+        Get some information about the yt installation
 
         ${cmd_usage}
         ${cmd_option_list}
@@ -388,7 +419,7 @@
 
     def do_load(self, subcmd, opts, arg):
         """
-        Load a single dataset into an IPython instance.
+        Load a single dataset into an IPython instance
 
         ${cmd_option_list}
         """
@@ -427,7 +458,7 @@
                       'halos','halo_hop_style','halo_radius','halo_radius_units'])
     def do_halos(self, subcmd, opts, arg):
         """
-        Run HaloProfiler on one dataset.
+        Run HaloProfiler on one dataset
 
         ${cmd_option_list}
         """
@@ -442,55 +473,6 @@
         if opts.make_projections:
             hp.make_projections()
 
-    @add_cmd_options(["maxw", "minw", "proj", "axis", "field", "weight",
-                      "zlim", "nframes", "output", "cmap", "uboxes", "dex",
-                      "text"])
-    def do_zoomin(self, subcmd, opts, arg):
-        """
-        Create a set of zoomin frames
-
-        ${cmd_option_list}
-        """
-        pf = _fix_pf(arg)
-        min_width = opts.min_width * pf.h.get_smallest_dx()
-        if opts.axis == 4:
-            axes = range(3)
-        else:
-            axes = [opts.axis]
-        pc = PlotCollection(pf)
-        for ax in axes: 
-            if opts.projection: p = pc.add_projection(opts.field, ax,
-                                    weight_field=opts.weight)
-            else: p = pc.add_slice(opts.field, ax)
-            if opts.unit_boxes: p.modify["units"](factor=8)
-            if opts.text is not None:
-                p.modify["text"](
-                    (0.02, 0.05), opts.text.replace(r"\n", "\n"),
-                    text_args=dict(size="medium", color="w"))
-        pc.set_width(opts.max_width,'1')
-        # Check the output directory
-        if not os.path.isdir(opts.output):
-            os.mkdir(opts.output)
-        # Figure out our zoom factor
-        # Recall that factor^nframes = min_width / max_width
-        # so factor = (log(min/max)/log(nframes))
-        mylog.info("min_width: %0.3e max_width: %0.3e nframes: %0.3e",
-                   min_width, opts.max_width, opts.nframes)
-        factor=10**(math.log10(min_width/opts.max_width)/opts.nframes)
-        mylog.info("Zoom factor: %0.3e", factor)
-        w = 1.0
-        for i in range(opts.nframes):
-            mylog.info("Setting width to %0.3e", w)
-            mylog.info("Saving frame %06i",i)
-            pc.set_width(w,"1")
-            if opts.zlim:
-                pc.set_zlim(*opts.zlim)
-            if opts.dex:
-                pc.set_zlim('min', None, opts.dex)
-            pc.set_cmap(opts.cmap)
-            pc.save(os.path.join(opts.output,"%s_frame%06i" % (pf,i)))
-            w = factor**i
-
     @add_cmd_options(["width", "unit", "bn", "proj", "center",
                       "zlim", "axis", "field", "weight", "skip",
                       "cmap", "output", "grids", "time"])
@@ -600,7 +582,7 @@
                 "%s (%0.5e years): %0.5e at %s\n" % (pf, t, v, c))
 
     @add_cmd_options([])
-    def do_analyze(self, subcmd, opts, arg):
+    def _do_analyze(self, subcmd, opts, arg):
         """
         Produce a set of analysis for a given output.  This includes
         HaloProfiler results with r200, as per the recipe file in the cookbook,
@@ -692,7 +674,7 @@
                   help="File to output to; else, print.")
     def do_pastebin(self, subcmd, opts, arg):
         """
-        Post a script to an anonymous pastebin.
+        Post a script to an anonymous pastebin
 
         Usage: yt pastebin [options] <script>
 
@@ -721,7 +703,7 @@
                   help="File to output to; else, print.")
     def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
         """
-        Download from your or another user's pasteboard.
+        Download from your or another user's pasteboard
 
         ${cmd_usage} 
         ${cmd_option_list}
@@ -1157,7 +1139,7 @@
                   help="Add a debugging mode for cell execution")
     def do_serve(self, subcmd, opts):
         """
-        Run the Web GUI
+        Run the Web GUI Reason
         """
         # We have to do a couple things.
         # First, we check that YT_DEST is set.
@@ -1208,7 +1190,7 @@
                     port=int(opts.port), repl=hr)
 
     
-    def do_remote(self, subcmd, opts):
+    def _do_remote(self, subcmd, opts):
         import getpass, sys, socket, time, webbrowser
         import yt.utilities.pexpect as pex
 
@@ -1427,7 +1409,44 @@
         req = urllib2.Request(uri, data)
         rv = urllib2.urlopen(req).read()
         print rv
-    
+
+    def do_upload_image(self, subcmd, opts, filename):
+        """
+        Upload an image to imgur.com.  Must be PNG.
+
+        ${cmd_usage} 
+        ${cmd_option_list}
+        """
+        if not filename.endswith(".png"):
+            print "File must be a PNG file!"
+            return 1
+        import base64, json, pprint
+        image_data = base64.b64encode(open(filename).read())
+        api_key = 'f62d550859558f28c4c214136bc797c7'
+        parameters = {'key':api_key, 'image':image_data, type:'base64',
+                      'caption': "",
+                      'title': "%s uploaded by yt" % filename}
+        data = urllib.urlencode(parameters)
+        req = urllib2.Request('http://api.imgur.com/2/upload.json', data)
+        try:
+            response = urllib2.urlopen(req).read()
+        except urllib2.HTTPError as e:
+            print "ERROR", e
+            return {'uploaded':False}
+        rv = json.loads(response)
+        if 'upload' in rv and 'links' in rv['upload']:
+            print
+            print "Image successfully uploaded!  You can find it at:"
+            print "    %s" % (rv['upload']['links']['imgur_page'])
+            print
+            print "If you'd like to delete it, visit this page:"
+            print "    %s" % (rv['upload']['links']['delete_page'])
+            print
+        else:
+            print
+            print "Something has gone wrong!  Here is the server response:"
+            print
+            pprint.pprint(rv)
 
 def run_main():
     for co in ["--parallel", "--paste"]:


--- a/yt/visualization/plot_modifications.py	Thu Aug 25 15:01:26 2011 -0400
+++ b/yt/visualization/plot_modifications.py	Sun Aug 28 18:58:31 2011 -0400
@@ -81,18 +81,20 @@
         else:
             xv = "%s-velocity" % (x_names[plot.data.axis])
             yv = "%s-velocity" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor, self.scale, self.scale_units)
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units)
         return qcb(plot)
 
 class MagFieldCallback(PlotCallback):
     _type_name = "magnetic_field"
-    def __init__(self, factor=16):
+    def __init__(self, factor=16, scale=None, scale_units=None):
         """
         Adds a 'quiver' plot of magnetic field to the plot, skipping all but
         every *factor* datapoint
         """
         PlotCallback.__init__(self)
         self.factor = factor
+        self.scale  = scale
+        self.scale_units = scale_units
 
     def __call__(self, plot):
         # Instantiation of these is cheap
@@ -101,12 +103,12 @@
         else:
             xv = "B%s" % (x_names[plot.data.axis])
             yv = "B%s" % (y_names[plot.data.axis])
-            qcb = QuiverCallback(xv, yv, self.factor)
+            qcb = QuiverCallback(xv, yv, self.factor, scale=self.scale, scale_units=self.scale_units)
         return qcb(plot)
 
 class QuiverCallback(PlotCallback):
     _type_name = "quiver"
-    def __init__(self, field_x, field_y, factor, scale, scale_units):
+    def __init__(self, field_x, field_y, factor, scale=None, scale_units=None):
         """
         Adds a 'quiver' plot to any plot, using the *field_x* and *field_y*
         from the associated data, skipping every *factor* datapoints


http://bitbucket.org/yt_analysis/yt/changeset/926c306aaea7/
changeset:   926c306aaea7
branch:      yt
user:        MatthewTurk
date:        2011-08-29 01:08:37
summary:     Updating to distribute 0.6.21.
affected #:  1 file (0 bytes)

--- a/distribute_setup.py	Sun Aug 28 18:58:31 2011 -0400
+++ b/distribute_setup.py	Sun Aug 28 19:08:37 2011 -0400
@@ -46,7 +46,7 @@
             args = [quote(arg) for arg in args]
         return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
 
-DEFAULT_VERSION = "0.6.14"
+DEFAULT_VERSION = "0.6.21"
 DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
 SETUPTOOLS_FAKED_VERSION = "0.6c11"
 


http://bitbucket.org/yt_analysis/yt/changeset/39339f71664a/
changeset:   39339f71664a
branch:      yt
user:        MatthewTurk
date:        2011-08-30 00:00:26
summary:     Adding a domain boundary function wrapper to RAMSES.
affected #:  1 file (240 bytes)

--- a/yt/frontends/ramses/_ramses_reader.pyx	Sun Aug 28 19:08:37 2011 -0400
+++ b/yt/frontends/ramses/_ramses_reader.pyx	Mon Aug 29 18:00:26 2011 -0400
@@ -401,6 +401,7 @@
         self.hydro_datas = <RAMSES_hydro_data ***>\
                        malloc(sizeof(RAMSES_hydro_data**) * self.rsnap.m_header.ncpu)
         self.ndomains = self.rsnap.m_header.ncpu
+        
         # Note we don't do ncpu + 1
         for idomain in range(self.rsnap.m_header.ncpu):
             # we don't delete local_tree
@@ -435,6 +436,13 @@
             self.field_ind[self.field_names[-1]] = ifield
         # This all needs to be cleaned up in the deallocator
 
+    def get_domain_boundaries(self):
+        bounds = []
+        for i in range(self.rsnap.m_header.ncpu):
+            bounds.append((self.rsnap.ind_min[i],
+                           self.rsnap.ind_max[i]))
+        return bounds
+
     def __dealloc__(self):
         cdef int idomain, ifield
         # To ensure that 'delete' is used, not 'free',


http://bitbucket.org/yt_analysis/yt/changeset/306ae990a4fb/
changeset:   306ae990a4fb
branch:      yt
user:        MatthewTurk
date:        2011-08-30 00:41:57
summary:     Adding a note about no-old-results to the test runner.
affected #:  1 file (94 bytes)

--- a/yt/utilities/answer_testing/runner.py	Mon Aug 29 18:00:26 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Mon Aug 29 18:41:57 2011 -0400
@@ -129,8 +129,11 @@
         self.plot_list[test.name] = test.plot()
         self.results[test.name] = test.result
         success, msg = self._compare(test)
-        if success == True: print "SUCCEEDED"
-        else: print "FAILED"
+        if self.old_results is None:
+            print "NO OLD RESULTS"
+        else:
+            if success == True: print "SUCCEEDED"
+            else: print "FAILED"
         self.passed_tests[test.name] = success
         if self.watcher is not None:
             if success == True:


http://bitbucket.org/yt_analysis/yt/changeset/dcaf3a69da42/
changeset:   dcaf3a69da42
branch:      yt
user:        MatthewTurk
date:        2011-08-30 00:50:17
summary:     Initial attempt to split RAMSES protosubgrids based on domains; required
addition of OverlappingSiblings attribute for grids.  Still getting a bit of a
mismatch in grid locations versus the domain indices, results in a
not-quite-1.0 projection of Ones everywhere, but otherwise this should improve
greatly the IO.  Will attempt to fix the projection issue shortly.
affected #:  3 files (1.3 KB)

--- a/yt/data_objects/grid_patch.py	Mon Aug 29 18:41:57 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Mon Aug 29 18:50:17 2011 -0400
@@ -47,6 +47,7 @@
     _type_name = 'grid'
     _skip_add = True
     _con_args = ('id', 'filename')
+    OverlappingSiblings = None
 
     __slots__ = ['data', 'field_parameters', 'id', 'hierarchy', 'pf',
                  'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
@@ -366,7 +367,7 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by
+        rf = self.pf.refine_by**(child.Level - self.Level)
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi/rf - gi)
         endIndex = na.minimum( (cgi+child.ActiveDimensions)/rf - gi,
@@ -384,6 +385,10 @@
         self._child_mask = na.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
+        if self.OverlappingSiblings is not None:
+            for sibling in self.OverlappingSiblings:
+                self.__fill_child_mask(sibling, self._child_mask, 0)
+        
         self._child_indices = (self._child_mask==0) # bool, possibly redundant
 
     def __generate_child_index_mask(self):
@@ -395,6 +400,10 @@
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
+        if self.OverlappingSiblings is not None:
+            for sibling in self.OverlappingSiblings:
+                self.__fill_child_mask(sibling, self._child_index_mask,
+                                       sibling.id)
 
     def _get_coords(self):
         if self.__coords == None: self._generate_coords()


--- a/yt/frontends/ramses/data_structures.py	Mon Aug 29 18:41:57 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Mon Aug 29 18:50:17 2011 -0400
@@ -189,16 +189,20 @@
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
-            for dleft_index, dfl in zip(lefts, locs):
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                self.num_deep = 0
-                psgs.extend(_ramses_reader.recursive_patch_splitting(
-                    psg, idims, initial_left, 
-                    dleft_index, dfl))
+            for ddleft_index, ddfl in zip(lefts, locs):
+                for idomain in na.unique(ddfl[:,0]):
+                    dom_ind = ddfl[:,0] == idomain
+                    dleft_index = ddleft_index[dom_ind,:]
+                    dfl = ddfl[dom_ind,:]
+                    initial_left = na.min(dleft_index, axis=0)
+                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    self.num_deep = 0
+                    psgs.extend(_ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl))
             mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
@@ -243,6 +247,17 @@
             if len(parents) > 0:
                 g.Parent.extend(parents.tolist())
                 for p in parents: p.Children.append(g)
+            # Now we do overlapping siblings; note that one has to "win" with
+            # siblings, so we assume the lower ID one will "win"
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
         self.max_level = self.grid_levels.max()


--- a/yt/utilities/_amr_utils/misc_utilities.pyx	Mon Aug 29 18:41:57 2011 -0400
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx	Mon Aug 29 18:50:17 2011 -0400
@@ -63,12 +63,13 @@
                         np.ndarray[np.float64_t, ndim=2] left_edges,
                         np.ndarray[np.float64_t, ndim=2] right_edges,
                         np.ndarray[np.int32_t, ndim=2] levels,
-                        np.ndarray[np.int32_t, ndim=1] mask):
+                        np.ndarray[np.int32_t, ndim=1] mask,
+                        int min_index = 0):
     cdef int i, n
     cdef int nx = left_edges.shape[0]
     cdef int inside 
     for i in range(nx):
-        if levels[i,0] != level:
+        if i < min_index or levels[i,0] != level:
             mask[i] = 0
             continue
         inside = 1


http://bitbucket.org/yt_analysis/yt/changeset/f79b6f9bcba3/
changeset:   f79b6f9bcba3
branch:      yt
user:        MatthewTurk
date:        2011-08-30 19:40:42
summary:     Merge
affected #:  1 file (416 bytes)

--- a/yt/utilities/amr_kdtree/amr_kdtree.py	Mon Aug 29 18:50:17 2011 -0400
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py	Tue Aug 30 13:40:42 2011 -0400
@@ -789,6 +789,24 @@
                 v += na.prod(node.r_corner - node.l_corner)
         return v
 
+    def count_cells(self):
+        r"""Calculates the numbers of cells of the kd-Tree
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        ----------
+        Total volume of the tree.
+        
+        """
+        c = na.int64(0)
+        for node in self.depth_traverse():
+            if node.grid is not None:
+                c += na.prod(node.ri - node.li).astype('int64')
+        return c
+
     def _build(self, grids, parent, l_corner, r_corner):
         r"""Builds the AMR kd-Tree
 


http://bitbucket.org/yt_analysis/yt/changeset/27eaff08b6cb/
changeset:   27eaff08b6cb
branch:      yt
user:        MatthewTurk
date:        2011-08-30 20:31:18
summary:     Initial attempt at preload definition for RAMSES data, which should enable semi-parallel work.
affected #:  3 files (1.0 KB)

--- a/yt/frontends/ramses/_ramses_reader.pyx	Tue Aug 30 13:40:42 2011 -0400
+++ b/yt/frontends/ramses/_ramses_reader.pyx	Tue Aug 30 14:31:18 2011 -0400
@@ -512,12 +512,13 @@
         # We delete and re-create
         cdef int varindex = self.field_ind[varname]
         cdef string *field_name = new string(varname)
-        if self.loaded[domain_index][varindex] == 0: return
-        cdef RAMSES_hydro_data *temp_hdata = self.hydro_datas[domain_index][varindex]
-        del temp_hdata
-        self.hydro_datas[domain_index - 1][varindex] = \
-            new RAMSES_hydro_data(deref(self.trees[domain_index]))
-        self.loaded[domain_index][varindex] = 0
+        cdef RAMSES_hydro_data *temp_hdata
+        if self.loaded[domain_index][varindex] == 1:
+            temp_hdata = self.hydro_datas[domain_index][varindex]
+            del temp_hdata
+            self.hydro_datas[domain_index][varindex] = \
+                new RAMSES_hydro_data(deref(self.trees[domain_index]))
+            self.loaded[domain_index][varindex] = 0
         del field_name
 
     def get_file_info(self):


--- a/yt/frontends/ramses/data_structures.py	Tue Aug 30 13:40:42 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Tue Aug 30 14:31:18 2011 -0400
@@ -65,6 +65,7 @@
         self.Parent = []
         self.Children = []
         self.locations = locations
+        self.domain = locations[0,0]
         self.start_index = start_index.copy()
 
     def _setup_dx(self):
@@ -245,7 +246,8 @@
                                 self.grid_levels, mask)
             parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
             # Now we do overlapping siblings; note that one has to "win" with
             # siblings, so we assume the lower ID one will "win"


--- a/yt/frontends/ramses/io.py	Tue Aug 30 13:40:42 2011 -0400
+++ b/yt/frontends/ramses/io.py	Tue Aug 30 14:31:18 2011 -0400
@@ -23,10 +23,12 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from collections import defaultdict
 import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
 
 class IOHandlerRAMSES(BaseIOHandler):
     _data_style = "ramses"
@@ -58,3 +60,21 @@
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+    def preload(self, grids, sets):
+        if len(grids) == 0: return
+        domain_keys = defaultdict(list)
+        pf_field_list = grids[0].pf.h.field_list
+        sets = [dset for dset in list(sets) if dset in pf_field_list]
+        exc = self._read_exception
+        for g in grids:
+            domain_keys[g.domain].append(g)
+        for domain, grids in domain_keys.items():
+            mylog.debug("Starting read of domain %s (%s)", domain, sets)
+            for field in sets:
+                for g in grids:
+                    self.queue[g.id][field] = self._read_data_set(g, field)
+                print "Clearing", field, domain
+                self.ramses_tree.clear_tree(field, domain - 1)
+        mylog.debug("Finished read of %s", sets)
+
+    def modify(self, data): return data


http://bitbucket.org/yt_analysis/yt/changeset/fa80ff395f07/
changeset:   fa80ff395f07
branch:      yt
user:        MatthewTurk
date:        2011-09-02 16:20:42
summary:     Merging
affected #:  19 files (17.1 KB)

--- a/MANIFEST.in	Thu Sep 01 16:07:42 2011 -0400
+++ b/MANIFEST.in	Fri Sep 02 10:20:42 2011 -0400
@@ -1,2 +1,3 @@
+include distribute_setup.py
 recursive-include yt/gui/reason/html/ *.html *.png *.ico *.js
 recursive-include yt/ *.pyx *.pxd *.hh *.h README* 


--- a/distribute_setup.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/distribute_setup.py	Fri Sep 02 10:20:42 2011 -0400
@@ -46,7 +46,7 @@
             args = [quote(arg) for arg in args]
         return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
 
-DEFAULT_VERSION = "0.6.14"
+DEFAULT_VERSION = "0.6.21"
 DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
 SETUPTOOLS_FAKED_VERSION = "0.6c11"
 


--- a/doc/install_script.sh	Thu Sep 01 16:07:42 2011 -0400
+++ b/doc/install_script.sh	Fri Sep 02 10:20:42 2011 -0400
@@ -306,7 +306,7 @@
 get_enzotools mercurial-1.8.1.tar.gz
 get_enzotools ipython-0.10.tar.gz
 get_enzotools h5py-1.3.1.tar.gz
-get_enzotools Cython-0.14.tar.gz
+get_enzotools Cython-0.15.tar.gz
 get_enzotools Forthon-0.8.4.tar.gz
 get_enzotools ext-3.3.2.zip
 get_enzotools ext-slate-110328.zip
@@ -522,7 +522,7 @@
 [ -n "${OLD_CFLAGS}" ] && export CFLAGS=${OLD_CFLAGS}
 do_setup_py ipython-0.10
 do_setup_py h5py-1.3.1
-do_setup_py Cython-0.14
+do_setup_py Cython-0.15
 [ $INST_FORTHON -eq 1 ] && do_setup_py Forthon-0.8.4
 
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"


--- a/setup.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/setup.py	Fri Sep 02 10:20:42 2011 -0400
@@ -81,7 +81,7 @@
 
 import setuptools
 
-VERSION = "2.2dev"
+VERSION = "2.3dev"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 


--- a/yt/analysis_modules/api.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/analysis_modules/api.py	Fri Sep 02 10:20:42 2011 -0400
@@ -79,7 +79,6 @@
     ExtractedParameterFile
 
 from .level_sets.api import \
-    GridConsiderationQueue, \
     coalesce_join_tree, \
     identify_contours, \
     Clump, \


--- a/yt/analysis_modules/light_cone/light_cone.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Fri Sep 02 10:20:42 2011 -0400
@@ -122,12 +122,20 @@
                                                                 deltaz_min=self.deltaz_min)
 
     def calculate_light_cone_solution(self, seed=None, filename=None):
-        """
-        Create list of projections to be added together to make the light cone.
-        :param seed (int): the seed for the random number generator.  Any light cone solution 
-               can be reproduced by giving the same random seed.  Default: None (each solution 
-               will be distinct).
-        :param filename (str): if given, a text file detailing the solution will be written out.  Default: None.
+        r"""Create list of projections to be added together to make the light cone.
+
+        Several sentences providing an extended description. Refer to
+        variables using back-ticks, e.g. `var`.
+
+        Parameters
+        ----------
+        seed : int
+            The seed for the random number generator.  Any light cone solution
+            can be reproduced by giving the same random seed.  Default: None
+            (each solution will be distinct).
+        filename : string
+            If given, a text file detailing the solution will be written out.
+            Default: None.
         """
 
         # Don't use box coherence with maximum projection depths.
@@ -209,13 +217,17 @@
             self._save_light_cone_solution(filename=filename)
 
     def get_halo_mask(self, mask_file=None, map_file=None, **kwargs):
+        r"""Gets a halo mask from a file or makes a new one.
+
+        Parameters
+        ----------
+        mask_file : string, optional
+            An HDF5 file to which to output the halo mask
+        map_file : string, optional
+            A text file to which to output the halo map (locations in the
+            images of the halos
+
         """
-        Gets a halo mask from a file or makes a new one.
-        :param mask_file (str): specify an hdf5 file to output the halo mask.
-        :param map_file (str): specify a text file to output the halo map 
-               (locations in image of halos).
-        """
-
         # Get halo map if map_file given.
         if map_file is not None and not os.path.exists(map_file):
             light_cone_halo_map(self, map_file=map_file, **kwargs)
@@ -240,22 +252,34 @@
     def project_light_cone(self, field, weight_field=None, apply_halo_mask=False, node=None,
                            save_stack=True, save_slice_images=False, cmap_name='algae', 
                            flatten_stack=False, photon_field=False):
-        """
-        Create projections for light cone, then add them together.
-        :param weight_field (str): the weight field of the projection.  This has the same meaning as in standard 
-               projections.  Default: None.
-        :param apply_halo_mask (bool): if True, a boolean mask is apply to the light cone projection.  See below for a 
-               description of halo masks.  Default: False.
-        :param node (str): a prefix to be prepended to the node name under which the projection data is serialized.  
-               Default: None.
-        :param save_stack (bool): if True, the unflatted light cone data including each individual slice is written to 
-               an hdf5 file.  Default: True.
-        :param save_slice_images (bool): save images for each individual projection slice.  Default: False.
-        :param cmap_name (str): color map for images.  Default: 'algae'.
-        :param flatten_stack (bool): if True, the light cone stack is continually flattened each time a slice is added 
-               in order to save memory.  This is generally not necessary.  Default: False.
-        :param photon_field (bool): if True, the projection data for each slice is decremented by 4 Pi R^2`, where R 
-               is the luminosity distance between the observer and the slice redshift.  Default: False.
+        r"""Create projections for light cone, then add them together.
+
+        Parameters
+        ----------
+        weight_field : str
+            the weight field of the projection.  This has the same meaning as
+            in standard projections.  Default: None.
+        apply_halo_mask : bool
+            if True, a boolean mask is apply to the light cone projection.  See
+            below for a description of halo masks.  Default: False.
+        node : string
+            a prefix to be prepended to the node name under which the
+            projection data is serialized.  Default: None.
+        save_stack : bool
+            if True, the unflatted light cone data including each individual
+            slice is written to an hdf5 file.  Default: True.
+        save_slice_images : bool
+            save images for each individual projection slice.  Default: False.
+        cmap_name : string
+            color map for images.  Default: 'algae'.
+        flatten_stack : bool
+            if True, the light cone stack is continually flattened each time a
+            slice is added in order to save memory.  This is generally not
+            necessary.  Default: False.
+        photon_field : bool
+            if True, the projection data for each slice is decremented by 4 Pi
+            R^2`, where R is the luminosity distance between the observer and
+            the slice redshift.  Default: False.
         """
 
         # Clear projection stack.


--- a/yt/data_objects/data_containers.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/data_objects/data_containers.py	Fri Sep 02 10:20:42 2011 -0400
@@ -40,7 +40,7 @@
 from yt.data_objects.particle_io import particle_handler_registry
 from yt.utilities.amr_utils import find_grids_in_inclined_box, \
     grid_points_in_volume, planar_points_in_volume, VoxelTraversal, \
-    QuadTree
+    QuadTree, get_box_grids_below_level
 from yt.utilities.data_point_utilities import CombineGrids, \
     DataCubeRefine, DataCubeReplace, FillRegion, FillBuffer
 from yt.utilities.definitions import axis_names, x_dict, y_dict
@@ -2966,251 +2966,6 @@
         """
         return 4./3. * math.pi * (self.radius * self.pf[unit])**3.0
 
-class AMRFloatCoveringGridBase(AMR3DData):
-    """
-    Covering grids represent fixed-resolution data over a given region.
-    In order to achieve this goal -- for instance in order to obtain ghost
-    zones -- grids up to and including the indicated level are included.
-    No interpolation is done (as that would affect the 'power' on small
-    scales) on the input data.
-    """
-    _spatial = True
-    _type_name = "float_covering_grid"
-    _con_args = ('level', 'left_edge', 'right_edge', 'ActiveDimensions')
-    def __init__(self, level, left_edge, right_edge, dims, fields = None,
-                 pf = None, num_ghost_zones = 0, use_pbar = True, **kwargs):
-        """
-        The data object returned will consider grids up to *level* in
-        generating fixed resolution data between *left_edge* and *right_edge*
-        that is *dims* (3-values) on a side.
-        """
-        AMR3DData.__init__(self, center=None, fields=fields, pf=pf, **kwargs)
-        self.left_edge = na.array(left_edge)
-        self.right_edge = na.array(right_edge)
-        self.level = level
-        self.ActiveDimensions = na.array(dims)
-        dds = (self.right_edge-self.left_edge) \
-              / self.ActiveDimensions
-        self.dds = dds
-        self.data["dx"] = dds[0]
-        self.data["dy"] = dds[1]
-        self.data["dz"] = dds[2]
-        self._num_ghost_zones = num_ghost_zones
-        self._use_pbar = use_pbar
-        self._refresh_data()
-
-    def _get_list_of_grids(self):
-        if self._grids is not None: return
-        if na.any(self.left_edge < self.pf.domain_left_edge) or \
-           na.any(self.right_edge > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
-                            self.left_edge, self.right_edge)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge, self.right_edge)
-        level_ind = na.where(self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
-
-    def extract_region(self, indices):
-        mylog.error("Sorry, dude, do it yourself, it's already in 3-D.")
-
-    def _refresh_data(self):
-        AMR3DData._refresh_data(self)
-        self['dx'] = self.dds[0] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dy'] = self.dds[1] * na.ones(self.ActiveDimensions, dtype='float64')
-        self['dz'] = self.dds[2] * na.ones(self.ActiveDimensions, dtype='float64')
-
-    def get_data(self, fields=None):
-        if self._grids is None:
-            self._get_list_of_grids()
-        if fields is None:
-            fields = self.fields[:]
-        else:
-            fields = ensure_list(fields)
-        obtain_fields = []
-        for field in fields:
-            if self.data.has_key(field): continue
-            if field not in self.hierarchy.field_list:
-                try:
-                    #print "Generating", field
-                    self._generate_field(field)
-                    continue
-                except NeedsOriginalGrid, ngt_exception:
-                    pass
-            obtain_fields.append(field)
-            self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
-        if len(obtain_fields) == 0: return
-        mylog.debug("Getting fields %s from %s possible grids",
-                   obtain_fields, len(self._grids))
-        if self._use_pbar: pbar = \
-                get_pbar('Searching grids for values ', len(self._grids))
-        for i, grid in enumerate(self._grids):
-            if self._use_pbar: pbar.update(i)
-            self._get_data_from_grid(grid, obtain_fields)
-            if not na.any(self[obtain_fields[0]] == -999): break
-        if self._use_pbar: pbar.finish()
-        if na.any(self[obtain_fields[0]] == -999):
-            # and self.dx < self.hierarchy.grids[0].dx:
-            print "COVERING PROBLEM", na.where(self[obtain_fields[0]]==-999)[0].size
-            print na.where(self[obtain_fields[0]]==-999)
-            raise KeyError
-            
-    def _generate_field(self, field):
-        if self.pf.field_info.has_key(field):
-            # First we check the validator; this might even raise!
-            self.pf.field_info[field].check_available(self)
-            self[field] = self.pf.field_info[field](self)
-        else: # Can't find the field, try as it might
-            raise KeyError(field)
-
-    def flush_data(self, field=None):
-        """
-        Any modifications made to the data in this object are pushed back
-        to the originating grids, except the cells where those grids are both
-        below the current level `and` have child cells.
-        """
-        self._get_list_of_grids()
-        # We don't generate coordinates here.
-        if field == None:
-            fields_to_get = self.fields[:]
-        else:
-            fields_to_get = ensure_list(field)
-        for grid in self._grids:
-            self._flush_data_to_grid(grid, fields_to_get)
-
-    @restore_grid_state
-    def _get_data_from_grid(self, grid, fields):
-        ll = int(grid.Level == self.level)
-        g_dx = grid.dds.ravel()
-        c_dx = self.dds.ravel()
-        g_fields = [grid[field] for field in ensure_list(fields)]
-        c_fields = [self[field] for field in ensure_list(fields)]
-        DataCubeRefine(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge, self.right_edge, c_dx, c_fields,
-            ll, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    def _flush_data_to_grid(self, grid, fields):
-        ll = int(grid.Level == self.level)
-        g_dx = grid.dds.ravel()
-        c_dx = self.dds.ravel()
-        g_fields = []
-        for field in ensure_list(fields):
-            if not grid.has_key(field): grid[field] = \
-               na.zeros(grid.ActiveDimensions, dtype=self[field].dtype)
-            g_fields.append(grid[field])
-        c_fields = [self[field] for field in ensure_list(fields)]
-        DataCubeReplace(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge, self.right_edge, c_dx, c_fields,
-            ll, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    @property
-    def LeftEdge(self):
-        return self.left_edge
-
-    @property
-    def RightEdge(self):
-        return self.right_edge
-
-class AMRSmoothedCoveringGridBase(AMRFloatCoveringGridBase):
-    _type_name = "smoothed_covering_grid"
-    def __init__(self, *args, **kwargs):
-        dlog2 = na.log10(kwargs['dims'])/na.log10(2)
-        if not na.all(na.floor(dlog2) == na.ceil(dlog2)):
-            pass # used to warn but I think it is not accurate anymore
-            #mylog.warning("Must be power of two dimensions")
-            #raise ValueError
-        #kwargs['num_ghost_zones'] = 0
-        AMRFloatCoveringGridBase.__init__(self, *args, **kwargs)
-
-    def _get_list_of_grids(self):
-        if na.any(self.left_edge - self.dds < self.pf.domain_left_edge) or \
-           na.any(self.right_edge + self.dds > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
-                            self.left_edge - self.dds,
-                            self.right_edge + self.dds)
-        else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge - self.dds,
-                            self.right_edge + self.dds)
-        level_ind = na.where(self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)]
-
-    def _get_level_array(self, level, fields):
-        fields = ensure_list(fields)
-        # We assume refinement by a factor of two
-        rf = self.pf.refine_by**(self.level - level)
-        dims = na.maximum(1,self.ActiveDimensions/rf) + 2
-        dx = (self.right_edge-self.left_edge)/(dims-2)
-        x,y,z = (na.mgrid[0:dims[0],0:dims[1],0:dims[2]].astype('float64')-0.5)\
-              * dx[0]
-        x += self.left_edge[0] - dx[0]
-        y += self.left_edge[1] - dx[1]
-        z += self.left_edge[2] - dx[2]
-        offsets = [self['cd%s' % ax]*0.5 for ax in 'xyz']
-        bounds = [self.left_edge[0]-offsets[0], self.right_edge[0]+offsets[0],
-                  self.left_edge[1]-offsets[1], self.right_edge[1]+offsets[1],
-                  self.left_edge[2]-offsets[2], self.right_edge[2]+offsets[2]]
-        fake_grid = {'x':x,'y':y,'z':z,'dx':dx[0],'dy':dx[1],'dz':dx[2]}
-        for ax in 'xyz': self['cd%s'%ax] = fake_grid['d%s'%ax]
-        for field in fields:
-            # Generate the new grid field
-            interpolator = TrilinearFieldInterpolator(
-                            self[field], bounds, ['x','y','z'],
-                            truncate = True)
-            self[field] = interpolator(fake_grid)
-        return fake_grid
-
-    def get_data(self, field=None):
-        self._get_list_of_grids()
-        # We don't generate coordinates here.
-        if field == None:
-            fields_to_get = self.fields[:]
-        else:
-            fields_to_get = ensure_list(field)
-        for field in fields_to_get:
-            grid_count = 0
-            if self.data.has_key(field):
-                continue
-            mylog.debug("Getting field %s from %s possible grids",
-                       field, len(self._grids))
-            if self._use_pbar: pbar = \
-                    get_pbar('Searching grids for values ', len(self._grids))
-            # How do we find out the root grid base dx?
-            idims = na.array([3,3,3])
-            dx = na.minimum((self.right_edge-self.left_edge)/(idims-2),
-                            self.pf.h.grids[0].dds[0])
-            idims = na.floor((self.right_edge-self.left_edge)/dx) + 2
-            for ax in 'xyz': self['cd%s'%ax] = dx[0]
-            self[field] = na.zeros(idims,dtype='float64')-999
-            for level in range(self.level+1):
-                for grid in self.select_grids(level):
-                    if self._use_pbar: pbar.update(grid_count)
-                    self._get_data_from_grid(grid, field)
-                    grid_count += 1
-                if level < self.level: self._get_level_array(level+1, field)
-            self[field] = self[field][1:-1,1:-1,1:-1]
-            if self._use_pbar: pbar.finish()
-        
-    @restore_grid_state
-    def _get_data_from_grid(self, grid, fields):
-        fields = ensure_list(fields)
-        g_dx = grid.dds
-        c_dx = na.array([self['cdx'],self['cdy'],self['cdz']])
-        g_fields = [grid[field] for field in fields]
-        c_fields = [self[field] for field in fields]
-        total = DataCubeRefine(
-            grid.LeftEdge, g_dx, g_fields, grid.child_mask,
-            self.left_edge-c_dx, self.right_edge+c_dx,
-            c_dx, c_fields,
-            1, self.pf.domain_left_edge, self.pf.domain_right_edge)
-
-    def flush_data(self, *args, **kwargs):
-        raise KeyError("Can't do this")
-
 class AMRCoveringGridBase(AMR3DData):
     _spatial = True
     _type_name = "covering_grid"
@@ -3234,16 +2989,15 @@
         if self._grids is not None: return
         if na.any(self.left_edge - buffer < self.pf.domain_left_edge) or \
            na.any(self.right_edge + buffer > self.pf.domain_right_edge):
-            grids,ind = self.pf.hierarchy.get_periodic_box_grids(
+            grids,ind = self.pf.hierarchy.get_periodic_box_grids_below_level(
                             self.left_edge - buffer,
-                            self.right_edge + buffer)
+                            self.right_edge + buffer, self.level)
         else:
-            grids,ind = self.pf.hierarchy.get_box_grids(
-                            self.left_edge - buffer,
-                            self.right_edge + buffer)
-        level_ind = (self.pf.hierarchy.grid_levels.ravel()[ind] <= self.level)
-        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind][level_ind])
-        self._grids = self.pf.hierarchy.grids[ind][level_ind][(sort_ind,)][::-1]
+            grids,ind = self.pf.hierarchy.get_box_grids_below_level(
+                self.left_edge - buffer,
+                self.right_edge + buffer, self.level)
+        sort_ind = na.argsort(self.pf.h.grid_levels.ravel()[ind])
+        self._grids = self.pf.hierarchy.grids[ind][(sort_ind,)][::-1]
 
     def _refresh_data(self):
         AMR3DData._refresh_data(self)
@@ -3346,20 +3100,22 @@
     def RightEdge(self):
         return self.right_edge
 
-class AMRIntSmoothedCoveringGridBase(AMRCoveringGridBase):
-    _type_name = "si_covering_grid"
+class AMRSmoothedCoveringGridBase(AMRCoveringGridBase):
+    _type_name = "smoothed_covering_grid"
     @wraps(AMRCoveringGridBase.__init__)
     def __init__(self, *args, **kwargs):
         AMRCoveringGridBase.__init__(self, *args, **kwargs)
         self._final_start_index = self.global_startindex
 
     def _get_list_of_grids(self):
-        buffer = self.pf.h.select_grids(0)[0].dds
+        if self._grids is not None: return
+        buffer = ((self.pf.domain_right_edge - self.pf.domain_left_edge)
+                 / self.pf.domain_dimensions).max()
         AMRCoveringGridBase._get_list_of_grids(self, buffer)
+        # We reverse the order to ensure that coarse grids are first
         self._grids = self._grids[::-1]
 
     def get_data(self, field=None):
-        dx = [self.pf.h.select_grids(l)[0].dds for l in range(self.level+1)]
         self._get_list_of_grids()
         # We don't generate coordinates here.
         if field == None:
@@ -3379,14 +3135,16 @@
             # L/R edges.
             # We jump-start our task here
             self._update_level_state(0, field)
-            for level in range(self.level+1):
-                for grid in self.select_grids(level):
-                    if self._use_pbar: pbar.update(grid_count)
-                    self._get_data_from_grid(grid, field, level)
-                    grid_count += 1
-                if level < self.level:
-                    self._update_level_state(level + 1)
+            
+            # The grids are assumed to be pre-sorted
+            last_level = 0
+            for gi, grid in enumerate(self._grids):
+                if self._use_pbar: pbar.update(gi)
+                if grid.Level > last_level and grid.Level <= self.level:
+                    self._update_level_state(last_level + 1)
                     self._refine(1, field)
+                    last_level = grid.Level
+                self._get_data_from_grid(grid, field, grid.Level)
             if self.level > 0:
                 self[field] = self[field][1:-1,1:-1,1:-1]
             if na.any(self[field] == -999):
@@ -3408,11 +3166,13 @@
             # We use one grid cell at LEAST, plus one buffer on all sides
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64') + 2
             self[field] = na.zeros(idims,dtype='float64')-999
+            self._cur_dims = idims.astype("int32")
         elif level == 0 and self.level == 0:
             DLE = self.pf.domain_left_edge
             self.global_startindex = na.array(na.floor(LL/ dx), dtype='int64')
             idims = na.rint((self.right_edge-self.left_edge)/dx).astype('int64')
             self[field] = na.zeros(idims,dtype='float64')-999
+            self._cur_dims = idims.astype("int32")
 
     def _refine(self, dlevel, field):
         rf = float(self.pf.refine_by**dlevel)
@@ -3438,17 +3198,17 @@
         interpolator = TrilinearFieldInterpolator(
                         self[field], old_bounds, ['x','y','z'],
                         truncate = True)
+        self._cur_dims = new_dims.astype("int32")
         self[field] = interpolator(fake_grid)
 
     def _get_data_from_grid(self, grid, fields, level):
         fields = ensure_list(fields)
         g_fields = [grid[field] for field in fields]
         c_fields = [self[field] for field in fields]
-        dims = na.array(self[field].shape, dtype='int32')
         count = FillRegion(1,
             grid.get_global_startindex(), self.global_startindex,
             c_fields, g_fields, 
-            dims, grid.ActiveDimensions,
+            self._cur_dims, grid.ActiveDimensions,
             grid.child_mask, self.domain_width, 1, 0)
         return count
 


--- a/yt/data_objects/derived_quantities.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Fri Sep 02 10:20:42 2011 -0400
@@ -173,9 +173,13 @@
     This function returns the location of the center
     of mass. By default, it computes of the *non-particle* data in the object. 
 
-    :param use_cells: if True, will include the cell mass (default: True)
-    :param use_particles: if True, will include the particles in the 
-    object (default: False)
+    Parameters
+    ----------
+
+    use_cells : bool
+        If True, will include the cell mass (default: True)
+    use_particles : bool
+        if True, will include the particles in the object (default: False)
     """
     x = y = z = den = 0
     if use_cells: 


--- a/yt/data_objects/field_info_container.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/data_objects/field_info_container.py	Fri Sep 02 10:20:42 2011 -0400
@@ -26,19 +26,20 @@
 """
 
 import types
-import numpy as na
 import inspect
 import copy
 import itertools
 
+import numpy as na
+
 from yt.funcs import *
 
 class FieldInfoContainer(object): # We are all Borg.
     """
     This is a generic field container.  It contains a list of potential derived
-    fields, all of which know how to act on a data object and return a value.  This
-    object handles converting units as well as validating the availability of a
-    given field.
+    fields, all of which know how to act on a data object and return a value.
+    This object handles converting units as well as validating the availability
+    of a given field.
     """
     _shared_state = {}
     _universal_field_list = {}
@@ -51,22 +52,25 @@
             return self._universal_field_list[key]
         raise KeyError
     def keys(self):
-        """
-        Return all the field names this object knows about.
-        """
+        """ Return all the field names this object knows about. """
         return self._universal_field_list.keys()
+
     def __iter__(self):
         return self._universal_field_list.iterkeys()
+
     def __setitem__(self, key, val):
         self._universal_field_list[key] = val
+
     def has_key(self, key):
         return key in self._universal_field_list
+
     def add_field(self, name, function = None, **kwargs):
         """
         Add a new field, along with supplemental metadata, to the list of
         available fields.  This respects a number of arguments, all of which
         are passed on to the constructor for
         :class:`~yt.data_objects.api.DerivedField`.
+
         """
         if function == None:
             def create_function(function):
@@ -74,6 +78,7 @@
                 return function
             return create_function
         self[name] = DerivedField(name, function, **kwargs)
+
 FieldInfo = FieldInfoContainer()
 add_field = FieldInfo.add_field
 
@@ -89,14 +94,18 @@
 class CodeFieldInfoContainer(FieldInfoContainer):
     def __setitem__(self, key, val):
         self._field_list[key] = val
+
     def __iter__(self):
         return itertools.chain(self._field_list.iterkeys(),
-                        self._universal_field_list.iterkeys())
+                               self._universal_field_list.iterkeys())
+
     def keys(self):
         return set(self._field_list.keys() + self._universal_field_list.keys())
+
     def has_key(self, key):
         return key in self._universal_field_list \
             or key in self._field_list
+
     def __getitem__(self, key):
         if key in self._field_list:
             return self._field_list[key]
@@ -111,6 +120,7 @@
     def __init__(self, ghost_zones = 0, fields=None):
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __str__(self):
         return "(%s, %s)" % (self.ghost_zones, self.fields)
 
@@ -121,18 +131,21 @@
 class NeedsDataField(ValidationException):
     def __init__(self, missing_fields):
         self.missing_fields = missing_fields
+
     def __str__(self):
         return "(%s)" % (self.missing_fields)
 
 class NeedsProperty(ValidationException):
     def __init__(self, missing_properties):
         self.missing_properties = missing_properties
+
     def __str__(self):
         return "(%s)" % (self.missing_properties)
 
 class NeedsParameter(ValidationException):
     def __init__(self, missing_parameters):
         self.missing_parameters = missing_parameters
+
     def __str__(self):
         return "(%s)" % (self.missing_parameters)
 
@@ -141,18 +154,19 @@
     NumberOfParticles = 1
     _read_exception = None
     _id_offset = 0
+
     def __init__(self, nd = 16, pf = None, flat = False):
         self.nd = nd
         self.flat = flat
         self._spatial = not flat
-        self.ActiveDimensions = [nd,nd,nd]
-        self.LeftEdge = [0.0,0.0,0.0]
-        self.RightEdge = [1.0,1.0,1.0]
+        self.ActiveDimensions = [nd, nd, nd]
+        self.LeftEdge = [0.0, 0.0, 0.0]
+        self.RightEdge = [1.0, 1.0, 1.0]
         self.dds = na.ones(3, "float64")
         self['dx'] = self['dy'] = self['dz'] = na.array([1.0])
         class fake_parameter_file(defaultdict):
             pass
-        if pf is None:
+        if pf is None:  # setup defaults
             pf = fake_parameter_file(lambda: 1)
             pf.current_redshift = pf.omega_lambda = pf.omega_matter = \
                 pf.hubble_constant = pf.cosmological_simulation = 0.0
@@ -168,17 +182,18 @@
             io = fake_io()
             def get_smallest_dx(self):
                 return 1.0
+
         self.hierarchy = fake_hierarchy()
         self.requested = []
         self.requested_parameters = []
         if not self.flat:
             defaultdict.__init__(self,
-                lambda: na.ones((nd,nd,nd), dtype='float64')
-                + 1e-4*na.random.random((nd,nd,nd)))
+                lambda: na.ones((nd, nd, nd), dtype='float64')
+                + 1e-4*na.random.random((nd, nd, nd)))
         else:
             defaultdict.__init__(self, 
-                lambda: na.ones((nd*nd*nd), dtype='float64')
-                + 1e-4*na.random.random((nd*nd*nd)))
+                lambda: na.ones((nd * nd * nd), dtype='float64')
+                + 1e-4*na.random.random((nd * nd * nd)))
     def __missing__(self, item):
         FI = getattr(self.pf, "field_info", FieldInfo)
         if FI.has_key(item) and \
@@ -190,15 +205,20 @@
                 nfd = FieldDetector(self.nd+ngz*2)
                 nfd._num_ghost_zones = ngz
                 vv = FI[item](nfd)
-                if ngz > 0: vv = vv[ngz:-ngz,ngz:-ngz,ngz:-ngz]
+                if ngz > 0: vv = vv[ngz:-ngz, ngz:-ngz, ngz:-ngz]
+
                 for i in nfd.requested:
                     if i not in self.requested: self.requested.append(i)
+
                 for i in nfd.requested_parameters:
-                    if i not in self.requested_parameters: self.requested_parameters.append(i)
+                    if i not in self.requested_parameters:
+                        self.requested_parameters.append(i)
+
             if vv is not None:
                 if not self.flat: self[item] = vv
                 else: self[item] = vv.ravel()
                 return self[item]
+
         self.requested.append(item)
         return defaultdict.__missing__(self, item)
 
@@ -249,23 +269,29 @@
         :param display_name: a name used in the plots
         :param projection_conversion: which unit should we multiply by in a
                                       projection?
+
         """
         self.name = name
         self._function = function
+
         if validators:
             self.validators = ensure_list(validators)
         else:
             self.validators = []
+
         self.take_log = take_log
         self._units = units
         self._projected_units = projected_units
+
         if not convert_function:
             convert_function = lambda a: 1.0
         self._convert_function = convert_function
         self._particle_convert_function = particle_convert_function
+
         self.particle_type = particle_type
         self.vector_field = vector_field
         self.projection_conversion = projection_conversion
+
         self.display_field = display_field
         self.display_name = display_name
         self.not_in_all = not_in_all
@@ -274,6 +300,7 @@
         """
         This raises an exception of the appropriate type if the set of
         validation mechanisms are not met, and otherwise returns True.
+
         """
         for validator in self.validators:
             validator(data)
@@ -283,6 +310,7 @@
     def get_dependencies(self, *args, **kwargs):
         """
         This returns a list of names of fields that this field depends on.
+
         """
         e = FieldDetector(*args, **kwargs)
         if self._function.func_name == '<lambda>':
@@ -292,47 +320,50 @@
         return e
 
     def get_units(self):
-        """
-        Return a string describing the units.
-        """
+        """ Return a string describing the units.  """
         return self._units
 
     def get_projected_units(self):
         """
         Return a string describing the units if the field has been projected.
+
         """
         return self._projected_units
 
     def __call__(self, data):
-        """
-        Return the value of the field in a given *data* object.
-        """
+        """ Return the value of the field in a given *data* object.  """
         ii = self.check_available(data)
         original_fields = data.keys() # Copy
         dd = self._function(self, data)
         dd *= self._convert_function(data)
+
         for field_name in data.keys():
             if field_name not in original_fields:
                 del data[field_name]
+
         return dd
 
     def get_source(self):
         """
         Return a string containing the source of the function (if possible.)
+
         """
         return inspect.getsource(self._function)
 
     def get_label(self, projected=False):
         """
         Return a data label for the given field, inluding units.
+
         """
         name = self.name
         if self.display_name is not None: name = self.display_name
         data_label = r"$\rm{%s}" % name
+
         if projected: units = self.get_projected_units()
         else: units = self.get_units()
         if units != "": data_label += r"\/\/ (%s)" % (units)
         data_label += r"$"
+
         return data_label
 
     def particle_convert(self, data):
@@ -347,9 +378,11 @@
     def __init__(self, parameters):
         """
         This validator ensures that the parameter file has a given parameter.
+
         """
         FieldValidator.__init__(self)
         self.parameters = ensure_list(parameters)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.parameters:
@@ -362,11 +395,13 @@
 class ValidateDataField(FieldValidator):
     def __init__(self, field):
         """
-        This validator ensures that the output file has a given data field stored
-        in it.
+        This validator ensures that the output file has a given data field
+        stored in it.
+
         """
         FieldValidator.__init__(self)
         self.fields = ensure_list(field)
+
     def __call__(self, data):
         doesnt_have = []
         if isinstance(data, FieldDetector): return True
@@ -375,15 +410,19 @@
                 doesnt_have.append(f)
         if len(doesnt_have) > 0:
             raise NeedsDataField(doesnt_have)
+
         return True
 
 class ValidateProperty(FieldValidator):
     def __init__(self, prop):
         """
-        This validator ensures that the data object has a given python attribute.
+        This validator ensures that the data object has a given python
+        attribute.
+
         """
         FieldValidator.__init__(self)
         self.prop = ensure_list(prop)
+
     def __call__(self, data):
         doesnt_have = []
         for p in self.prop:
@@ -391,6 +430,7 @@
                 doesnt_have.append(p)
         if len(doesnt_have) > 0:
             raise NeedsProperty(doesnt_have)
+
         return True
 
 class ValidateSpatial(FieldValidator):
@@ -398,13 +438,15 @@
         """
         This validator ensures that the data handed to the field is of spatial
         nature -- that is to say, 3-D.
+
         """
         FieldValidator.__init__(self)
         self.ghost_zones = ghost_zones
         self.fields = fields
+
     def __call__(self, data):
-        # When we say spatial information, we really mean
-        # that it has a three-dimensional data structure
+        # When we say spatial information, we really mean that it has a
+        # three-dimensional data structure
         #if isinstance(data, FieldDetector): return True
         if not data._spatial:
             raise NeedsGridType(self.ghost_zones,self.fields)
@@ -417,8 +459,10 @@
         """
         This validator ensures that the data handed to the field is an actual
         grid patch, not a covering grid of any kind.
+
         """
         FieldValidator.__init__(self)
+
     def __call__(self, data):
         # We need to make sure that it's an actual AMR grid
         if isinstance(data, FieldDetector): return True


--- a/yt/data_objects/grid_patch.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/data_objects/grid_patch.py	Fri Sep 02 10:20:42 2011 -0400
@@ -47,6 +47,7 @@
     _type_name = 'grid'
     _skip_add = True
     _con_args = ('id', 'filename')
+    OverlappingSiblings = None
 
     __slots__ = ['data', 'field_parameters', 'id', 'hierarchy', 'pf',
                  'ActiveDimensions', 'LeftEdge', 'RightEdge', 'Level',
@@ -366,7 +367,7 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by
+        rf = self.pf.refine_by**(child.Level - self.Level)
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi/rf - gi)
         endIndex = na.minimum( (cgi+child.ActiveDimensions)/rf - gi,
@@ -384,6 +385,10 @@
         self._child_mask = na.ones(self.ActiveDimensions, 'int32')
         for child in self.Children:
             self.__fill_child_mask(child, self._child_mask, 0)
+        if self.OverlappingSiblings is not None:
+            for sibling in self.OverlappingSiblings:
+                self.__fill_child_mask(sibling, self._child_mask, 0)
+        
         self._child_indices = (self._child_mask==0) # bool, possibly redundant
 
     def __generate_child_index_mask(self):
@@ -395,6 +400,10 @@
         for child in self.Children:
             self.__fill_child_mask(child, self._child_index_mask,
                                    child.id)
+        if self.OverlappingSiblings is not None:
+            for sibling in self.OverlappingSiblings:
+                self.__fill_child_mask(sibling, self._child_index_mask,
+                                       sibling.id)
 
     def _get_coords(self):
         if self.__coords == None: self._generate_coords()
@@ -441,7 +450,7 @@
         if smoothed:
             #cube = self.hierarchy.smoothed_covering_grid(
             #    level, new_left_edge, new_right_edge, **kwargs)
-            cube = self.hierarchy.si_covering_grid(
+            cube = self.hierarchy.smoothed_covering_grid(
                 level, new_left_edge, **kwargs)
         else:
             cube = self.hierarchy.covering_grid(


--- a/yt/data_objects/object_finding_mixin.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/data_objects/object_finding_mixin.py	Fri Sep 02 10:20:42 2011 -0400
@@ -26,6 +26,9 @@
 import numpy as na
 
 from yt.funcs import *
+from yt.utilities.amr_utils import \
+    get_box_grids_level, \
+    get_box_grids_below_level
 
 class ObjectFindingMixin(object):
 
@@ -115,7 +118,7 @@
         as the input *fields*.
         
         Parameters
-        ---------
+        ----------
         fields : string or list of strings
             The field(s) that will be returned.
         
@@ -201,3 +204,33 @@
                     mask[gi] = True
         return self.grids[mask], na.where(mask)
 
+    def get_box_grids_below_level(self, left_edge, right_edge, level):
+        # We discard grids if they are ABOVE the level
+        mask = na.empty(self.grids.size, dtype='int32')
+        get_box_grids_below_level(left_edge, right_edge,
+                            level,
+                            self.grid_left_edge, self.grid_right_edge,
+                            self.grid_levels, mask)
+        mask = mask.astype("bool")
+        return self.grids[mask], na.where(mask)
+
+    def get_periodic_box_grids_below_level(self, left_edge, right_edge, level):
+        left_edge = na.array(left_edge)
+        right_edge = na.array(right_edge)
+        mask = na.zeros(self.grids.shape, dtype='bool')
+        dl = self.parameter_file.domain_left_edge
+        dr = self.parameter_file.domain_right_edge
+        db = right_edge - left_edge
+        for off_x in [-1, 0, 1]:
+            nle = left_edge.copy()
+            nre = left_edge.copy()
+            nle[0] = dl[0] + (dr[0]-dl[0])*off_x + left_edge[0]
+            for off_y in [-1, 0, 1]:
+                nle[1] = dl[1] + (dr[1]-dl[1])*off_y + left_edge[1]
+                for off_z in [-1, 0, 1]:
+                    nle[2] = dl[2] + (dr[2]-dl[2])*off_z + left_edge[2]
+                    nre = nle + db
+                    g, gi = self.get_box_grids_below_level(nle, nre, level)
+                    mask[gi] = True
+        return self.grids[mask], na.where(mask)
+


--- a/yt/frontends/nyx/data_structures.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/frontends/nyx/data_structures.py	Fri Sep 02 10:20:42 2011 -0400
@@ -548,11 +548,17 @@
         """
         self.storage_filename = storage_filename
         self.parameter_filename = param_filename
-        self.parameter_file_path = os.path.abspath(self.parameter_filename)
         self.fparameter_filename = fparam_filename
-        self.fparameter_file_path = os.path.abspath(self.fparameter_filename)
+
         self.path = os.path.abspath(plotname)  # data folder
 
+        # silly inputs and probin file thing (this is on the Nyx todo list)
+        self.parameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                self.parameter_filename)
+
+        self.fparameter_file_path = os.path.join(os.path.dirname(self.path),
+                                                 self.fparameter_filename)
+
         self.fparameters = {}
 
         # @todo: quick fix...


--- a/yt/frontends/orion/data_structures.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/frontends/orion/data_structures.py	Fri Sep 02 10:20:42 2011 -0400
@@ -444,16 +444,12 @@
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
                  data_style='orion_native', paranoia=False,
                  storage_filename = None):
-        """need to override for Orion file structure.
-
-        the paramfile is usually called "inputs"
+        """
+        The paramfile is usually called "inputs"
         and there may be a fortran inputs file usually called "probin"
         plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
         """
         self.storage_filename = storage_filename
         self.paranoid_read = paranoia


--- a/yt/frontends/ramses/_ramses_reader.pyx	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/frontends/ramses/_ramses_reader.pyx	Fri Sep 02 10:20:42 2011 -0400
@@ -401,6 +401,7 @@
         self.hydro_datas = <RAMSES_hydro_data ***>\
                        malloc(sizeof(RAMSES_hydro_data**) * self.rsnap.m_header.ncpu)
         self.ndomains = self.rsnap.m_header.ncpu
+        
         # Note we don't do ncpu + 1
         for idomain in range(self.rsnap.m_header.ncpu):
             # we don't delete local_tree
@@ -435,6 +436,13 @@
             self.field_ind[self.field_names[-1]] = ifield
         # This all needs to be cleaned up in the deallocator
 
+    def get_domain_boundaries(self):
+        bounds = []
+        for i in range(self.rsnap.m_header.ncpu):
+            bounds.append((self.rsnap.ind_min[i],
+                           self.rsnap.ind_max[i]))
+        return bounds
+
     def __dealloc__(self):
         cdef int idomain, ifield
         # To ensure that 'delete' is used, not 'free',
@@ -504,12 +512,13 @@
         # We delete and re-create
         cdef int varindex = self.field_ind[varname]
         cdef string *field_name = new string(varname)
-        if self.loaded[domain_index][varindex] == 0: return
-        cdef RAMSES_hydro_data *temp_hdata = self.hydro_datas[domain_index][varindex]
-        del temp_hdata
-        self.hydro_datas[domain_index - 1][varindex] = \
-            new RAMSES_hydro_data(deref(self.trees[domain_index]))
-        self.loaded[domain_index][varindex] = 0
+        cdef RAMSES_hydro_data *temp_hdata
+        if self.loaded[domain_index][varindex] == 1:
+            temp_hdata = self.hydro_datas[domain_index][varindex]
+            del temp_hdata
+            self.hydro_datas[domain_index][varindex] = \
+                new RAMSES_hydro_data(deref(self.trees[domain_index]))
+            self.loaded[domain_index][varindex] = 0
         del field_name
 
     def get_file_info(self):


--- a/yt/frontends/ramses/data_structures.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Fri Sep 02 10:20:42 2011 -0400
@@ -65,6 +65,7 @@
         self.Parent = []
         self.Children = []
         self.locations = locations
+        self.domain = locations[0,0]
         self.start_index = start_index.copy()
 
     def _setup_dx(self):
@@ -189,16 +190,20 @@
                         level, unique_indices.size, hilbert_indices.size)
             locs, lefts = _ramses_reader.get_array_indices_lists(
                         hilbert_indices, unique_indices, left_index, fl)
-            for dleft_index, dfl in zip(lefts, locs):
-                initial_left = na.min(dleft_index, axis=0)
-                idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
-                psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
-                                dleft_index, dfl)
-                if psg.efficiency <= 0: continue
-                self.num_deep = 0
-                psgs.extend(_ramses_reader.recursive_patch_splitting(
-                    psg, idims, initial_left, 
-                    dleft_index, dfl))
+            for ddleft_index, ddfl in zip(lefts, locs):
+                for idomain in na.unique(ddfl[:,0]):
+                    dom_ind = ddfl[:,0] == idomain
+                    dleft_index = ddleft_index[dom_ind,:]
+                    dfl = ddfl[dom_ind,:]
+                    initial_left = na.min(dleft_index, axis=0)
+                    idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
+                    psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
+                                    dleft_index, dfl)
+                    if psg.efficiency <= 0: continue
+                    self.num_deep = 0
+                    psgs.extend(_ramses_reader.recursive_patch_splitting(
+                        psg, idims, initial_left, 
+                        dleft_index, dfl))
             mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
@@ -241,8 +246,20 @@
                                 self.grid_levels, mask)
             parents = self.grids[mask.astype("bool")]
             if len(parents) > 0:
-                g.Parent.extend(parents.tolist())
+                g.Parent.extend((p for p in parents.tolist()
+                        if p.locations[0,0] == g.locations[0,0]))
                 for p in parents: p.Children.append(g)
+            # Now we do overlapping siblings; note that one has to "win" with
+            # siblings, so we assume the lower ID one will "win"
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                g.Level,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask, gi)
+            mask[gi] = False
+            siblings = self.grids[mask.astype("bool")]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
             g._prepare_grid()
             g._setup_dx()
         self.max_level = self.grid_levels.max()
@@ -330,6 +347,14 @@
                                            * rheader['boxlen']
         self.domain_left_edge = na.zeros(3, dtype='float64')
         self.domain_dimensions = na.ones(3, dtype='int32') * 2
+        # This is likely not true, but I am not sure how to otherwise
+        # distinguish them.
+        mylog.warning("No current mechanism of distinguishing cosmological simulations in RAMSES!")
+        self.cosmological_simulation = 1
+        self.current_redshift = (1.0 / rheader["aexp"]) - 1.0
+        self.omega_lambda = rheader["omega_l"]
+        self.omega_matter = rheader["omega_m"]
+        self.hubble_constant = rheader["H0"]
 
     @classmethod
     def _is_valid(self, *args, **kwargs):


--- a/yt/frontends/ramses/io.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/frontends/ramses/io.py	Fri Sep 02 10:20:42 2011 -0400
@@ -23,10 +23,12 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
+from collections import defaultdict
 import numpy as na
 
 from yt.utilities.io_handler import \
     BaseIOHandler
+from yt.utilities.logger import ytLogger as mylog
 
 class IOHandlerRAMSES(BaseIOHandler):
     _data_style = "ramses"
@@ -58,3 +60,21 @@
         sl[axis] = slice(coord, coord + 1)
         return self._read_data_set(grid, field)[sl]
 
+    def preload(self, grids, sets):
+        if len(grids) == 0: return
+        domain_keys = defaultdict(list)
+        pf_field_list = grids[0].pf.h.field_list
+        sets = [dset for dset in list(sets) if dset in pf_field_list]
+        exc = self._read_exception
+        for g in grids:
+            domain_keys[g.domain].append(g)
+        for domain, grids in domain_keys.items():
+            mylog.debug("Starting read of domain %s (%s)", domain, sets)
+            for field in sets:
+                for g in grids:
+                    self.queue[g.id][field] = self._read_data_set(g, field)
+                print "Clearing", field, domain
+                self.ramses_tree.clear_tree(field, domain - 1)
+        mylog.debug("Finished read of %s", sets)
+
+    def modify(self, data): return data


--- a/yt/funcs.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/funcs.py	Fri Sep 02 10:20:42 2011 -0400
@@ -137,17 +137,9 @@
     return resident * pagesize / (1024 * 1024) # return in megs
 
 def time_execution(func):
-    """
+    r"""
     Decorator for seeing how long a given function takes, depending on whether
     or not the global 'yt.timefunctions' config parameter is set.
-
-    This can be used like so:
-
-    .. code-block:: python
-
-       @time_execution
-    def some_longrunning_function(...):
-
     """
     @wraps(func)
     def wrapper(*arg, **kw):


--- a/yt/utilities/_amr_utils/misc_utilities.pyx	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx	Fri Sep 02 10:20:42 2011 -0400
@@ -27,6 +27,9 @@
 cimport numpy as np
 cimport cython
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_color_bounds(np.ndarray[np.float64_t, ndim=1] px,
                      np.ndarray[np.float64_t, ndim=1] py,
                      np.ndarray[np.float64_t, ndim=1] pdx,
@@ -51,28 +54,57 @@
             if v > ma: ma = v
     return (mi, ma)
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_box_grids_level(np.ndarray[np.float64_t, ndim=1] left_edge,
                         np.ndarray[np.float64_t, ndim=1] right_edge,
                         int level,
                         np.ndarray[np.float64_t, ndim=2] left_edges,
                         np.ndarray[np.float64_t, ndim=2] right_edges,
                         np.ndarray[np.int32_t, ndim=2] levels,
+                        np.ndarray[np.int32_t, ndim=1] mask,
+                        int min_index = 0):
+    cdef int i, n
+    cdef int nx = left_edges.shape[0]
+    cdef int inside 
+    for i in range(nx):
+        if i < min_index or levels[i,0] != level:
+            mask[i] = 0
+            continue
+        inside = 1
+        for n in range(3):
+            if left_edge[n] >= right_edges[i,n] or \
+               right_edge[n] <= left_edges[i,n]:
+                inside = 0
+                break
+        if inside == 1: mask[i] = 1
+        else: mask[i] = 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def get_box_grids_below_level(
+                        np.ndarray[np.float64_t, ndim=1] left_edge,
+                        np.ndarray[np.float64_t, ndim=1] right_edge,
+                        int level,
+                        np.ndarray[np.float64_t, ndim=2] left_edges,
+                        np.ndarray[np.float64_t, ndim=2] right_edges,
+                        np.ndarray[np.int32_t, ndim=2] levels,
                         np.ndarray[np.int32_t, ndim=1] mask):
     cdef int i, n
     cdef int nx = left_edges.shape[0]
     cdef int inside 
     for i in range(nx):
-        if levels[i,0] != level:
-            mask[i] = 0
-            continue
-        inside = 1
-        for n in range(3):
-            if left_edge[n] >= right_edges[i,n] or \
-               right_edge[n] <= left_edges[i,n]:
-                inside = 0
-                break
-        if inside == 1: mask[i] = 1
-        else: mask[i] = 0
+        mask[i] = 0
+        if levels[i,0] <= level:
+            inside = 1
+            for n in range(3):
+                if left_edge[n] >= right_edges[i,n] or \
+                   right_edge[n] <= left_edges[i,n]:
+                    inside = 0
+                    break
+            if inside == 1: mask[i] = 1
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


--- a/yt/utilities/answer_testing/runner.py	Thu Sep 01 16:07:42 2011 -0400
+++ b/yt/utilities/answer_testing/runner.py	Fri Sep 02 10:20:42 2011 -0400
@@ -129,8 +129,11 @@
         self.plot_list[test.name] = test.plot()
         self.results[test.name] = test.result
         success, msg = self._compare(test)
-        if success == True: print "SUCCEEDED"
-        else: print "FAILED"
+        if self.old_results is None:
+            print "NO OLD RESULTS"
+        else:
+            if success == True: print "SUCCEEDED"
+            else: print "FAILED"
         self.passed_tests[test.name] = success
         if self.watcher is not None:
             if success == True:

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list