[Yt-svn] commit/yt: 3 new changesets

Bitbucket commits-noreply at bitbucket.org
Wed Jun 8 08:55:21 PDT 2011


3 new changesets in yt:

http://bitbucket.org/yt_analysis/yt/changeset/31d50c04258e/
changeset:   31d50c04258e
branch:      yt
user:        MatthewTurk
date:        2011-06-08 04:45:54
summary:     More updates to Ramses; nearly OOM speedup from this morning, but some things
have ceased working.
affected #:  2 files (3.9 KB)

--- a/yt/frontends/ramses/_ramses_reader.pyx	Tue Jun 07 20:22:57 2011 -0400
+++ b/yt/frontends/ramses/_ramses_reader.pyx	Tue Jun 07 22:45:54 2011 -0400
@@ -654,17 +654,17 @@
                   np.ndarray[np.float64_t, ndim=3] data,
                   np.ndarray[np.int32_t, ndim=3] filled,
                   int level, int ref_factor,
-                  component_grid_info):
+                  np.ndarray[np.int64_t, ndim=2] component_grid_info):
         cdef int varindex = self.field_ind[field]
         cdef RAMSES_tree *local_tree = NULL
         cdef RAMSES_hydro_data *local_hydro_data = NULL
 
         cdef int gi, i, j, k, domain, offset
         cdef int ir, jr, kr
+        cdef int n
         cdef int offi, offj, offk, odind
         cdef np.int64_t di, dj, dk
-        cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
-        cdef np.ndarray[np.int64_t, ndim=1] og_start_index
+        cdef np.int32_t og_start_index[3]
         cdef np.float64_t temp_data
         cdef np.int64_t end_index[3]
         cdef int to_fill = 0
@@ -672,15 +672,13 @@
         #   (k*2 + j)*2 + i
         for i in range(3):
             end_index[i] = start_index[i] + grid_dims[i]
-        for gi in range(len(component_grid_info)):
-            ogrid_info = component_grid_info[gi]
-            domain = ogrid_info[0]
-            #print "Loading", domain, ogrid_info
+        for gi in range(component_grid_info.shape[0]):
+            domain = component_grid_info[gi,0]
             self.ensure_loaded(field, domain - 1)
             local_tree = self.trees[domain - 1]
             local_hydro_data = self.hydro_datas[domain - 1][varindex]
-            offset = ogrid_info[1]
-            og_start_index = ogrid_info[3:]
+            offset = component_grid_info[gi,1]
+            for n in range(3): og_start_index[n] = component_grid_info[gi,3+n]
             for i in range(2*ref_factor):
                 di = i + og_start_index[0] * ref_factor
                 if di < start_index[0] or di >= end_index[0]: continue
@@ -709,22 +707,6 @@
                         to_fill += 1
         return to_fill
 
-#def recursive_patch_splitting(ProtoSubgrid psg,
-#        np.ndarray[np.int64_t, ndim=1] dims,
-#        np.ndarray[np.int64_t, ndim=1] inds,
-#        np.ndarray[np.int64_t, ndim=2] left_index,
-#        np.ndarray[np.int64_t, ndim=2] right_index,
-#        np.ndarray[np.int64_t, ndim=2] gdims,
-#        np.ndarray[np.int64_t, ndim=2] fl,
-#        int num_deep = 0):
-#    cdef float min_eff = 0.1
-#    if num_deep > 40:
-#        psg.efficiency = min_eff
-#        return [psg]
-#    if psg.efficiency > min_eff or psg.efficiency < 0.0:
-#        return [psg]
-#    cdef 
-#
 cdef class ProtoSubgrid:
     cdef np.int64_t *signature[3]
     cdef np.int64_t left_edge[3]
@@ -784,10 +766,10 @@
         sig2 = self.sigs[2]
         efficiency = 0.0
         cdef int used
-        self.grid_file_locations = []
+        cdef np.ndarray[np.int32_t, ndim=1] mask
+        mask = np.zeros(ng, 'int32')
+        used = 0
         for gi in range(ng):
-            used = 0
-            nnn = 0
             for l0 in range(2):
                 i0 = left_edges[gi, 0] + l0
                 if i0 < self.left_edge[0]: continue
@@ -807,12 +789,19 @@
                         i = i2 - self.left_edge[2]
                         sig2[i] += 1
                         efficiency += 1
-                        used = 1
-            if used == 1:
+                        used += 1
+                        mask[gi] = 1
+        cdef np.ndarray[np.int64_t, ndim=2] gfl
+        gfl = np.zeros((used, 6), 'int64')
+        used = 0
+        self.grid_file_locations = gfl
+        for gi in range(ng):
+            if mask[gi] == 1:
                 grid_file_locations[gi,3] = left_edges[gi, 0]
                 grid_file_locations[gi,4] = left_edges[gi, 1]
                 grid_file_locations[gi,5] = left_edges[gi, 2]
-                self.grid_file_locations.append(grid_file_locations[gi,:])
+                for i in range(6):
+                    gfl[used, i] = grid_file_locations[gi,i]
          
         self.dd = np.ones(3, dtype='int64')
         for i in range(3):
@@ -828,7 +817,7 @@
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
-    def find_split(self):
+    cdef void find_split(self, int *tr):
         # First look for zeros
         cdef int i, center, ax
         cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
@@ -842,7 +831,8 @@
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
-                    return 0, ax, i
+                    tr[0] = 0; tr[1] = ax; tr[2] = i
+                    return
         zcstrength = 0
         zcp = 0
         zca = -1
@@ -866,7 +856,8 @@
                         zca = ax
             free(sig2d)
         #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
-        return 1, ax, zcp
+        tr[0] = 1; tr[1] = ax; tr[2] = zcp
+        return
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
@@ -970,7 +961,9 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def get_array_indices_lists(np.ndarray[np.int64_t, ndim=1] ind,
-                            np.ndarray[np.int64_t, ndim=1] uind):
+                            np.ndarray[np.int64_t, ndim=1] uind,
+                            np.ndarray[np.int64_t, ndim=2] lefts,
+                            np.ndarray[np.int64_t, ndim=2] files):
     cdef np.ndarray[np.int64_t, ndim=1] count = np.zeros(uind.shape[0], 'int64')
     cdef int n, i
     cdef np.int64_t mi, mui
@@ -980,23 +973,89 @@
             if uind[n] == mi:
                 count[n] += 1
                 break
-    cdef np.int64_t **inds
-    inds = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
+    cdef np.int64_t **alefts
+    cdef np.int64_t **afiles
+    afiles = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
+    alefts = <np.int64_t **> malloc(sizeof(np.int64_t *) * uind.shape[0])
     cdef int *li = <int *> malloc(sizeof(int) * uind.shape[0])
-    cdef np.ndarray[np.int64_t, ndim=1] indices
-    all_indices = []
+    cdef np.ndarray[np.int64_t, ndim=2] locations
+    cdef np.ndarray[np.int64_t, ndim=2] left
+    all_locations = []
+    all_lefts = []
     for n in range(uind.shape[0]):
-        indices = np.zeros(count[n], 'int64')
-        all_indices.append(indices)
-        inds[n] = <np.int64_t *> indices.data
+        locations = np.zeros((count[n], 6), 'int64')
+        left = np.zeros((count[n], 3), 'int64')
+        all_locations.append(locations)
+        all_lefts.append(left)
+        afiles[n] = <np.int64_t *> locations.data
+        alefts[n] = <np.int64_t *> left.data
         li[n] = 0
+    cdef int fi
     for i in range(ind.shape[0]):
         mi = ind[i]
         for n in range(uind.shape[0]):
             if uind[n] == mi:
-                inds[n][li[n]] = i
+                for fi in range(3):
+                    alefts[n][li[n] * 3 + fi] = lefts[i, fi]
+                for fi in range(6):
+                    afiles[n][li[n] * 6 + fi] = files[i, fi]
                 li[n] += 1
                 break
-    free(inds) # not inds[...]
-    free(li)
-    return all_indices
+    free(afiles)
+    free(alefts)
+    return all_locations, all_lefts
+
+def recursive_patch_splitting(ProtoSubgrid psg,
+        np.ndarray[np.int64_t, ndim=1] dims,
+        np.ndarray[np.int64_t, ndim=1] ind,
+        np.ndarray[np.int64_t, ndim=2] left_index,
+        np.ndarray[np.int64_t, ndim=2] fl,
+        int num_deep = 0):
+    cdef float min_eff = 0.1
+    cdef ProtoSubgrid L, R
+    cdef np.ndarray[np.int64_t, ndim=1] dims_l, li_l
+    cdef np.ndarray[np.int64_t, ndim=1] dims_r, li_r
+    cdef int tt, ax, fp, i, j, k, gi
+    cdef int tr[3]
+    if num_deep > 40:
+        psg.efficiency = min_eff
+        return [psg]
+    if psg.efficiency > min_eff or psg.efficiency < 0.0:
+        return [psg]
+    psg.find_split(tr)
+    tt = tr[0]
+    ax = tr[1]
+    fp = tr[2]
+    if (fp % 2) != 0:
+        if dims[ax] != fp + 1:
+            fp += 1
+        else:
+            fp -= 1
+    dims_l = dims.copy()
+    dims_l[ax] = fp
+    li_l = ind.copy()
+    for i in range(3):
+        if dims_l[i] <= 0: return [psg]
+    dims_r = dims.copy()
+    dims_r[ax] -= fp
+    li_r = ind.copy()
+    li_r[ax] += fp
+    for i in range(3):
+        if dims_r[i] <= 0: return [psg]
+    L = ProtoSubgrid(li_l, dims_l, left_index, fl)
+    if L.efficiency > 1.0: raise RuntimeError
+    if L.efficiency <= 0.0: rv_l = []
+    elif L.efficiency < min_eff:
+        rv_l = recursive_patch_splitting(L, dims_r, li_r,
+                left_index, fl, num_deep + 1)
+    else:
+        rv_l = [L]
+    R = ProtoSubgrid(li_r, dims_r, left_index, fl)
+    if R.efficiency > 1.0: raise RuntimeError
+    if R.efficiency <= 0.0: rv_r = []
+    elif R.efficiency < min_eff:
+        rv_r = recursive_patch_splitting(R, dims_r, li_r,
+                left_index, fl, num_deep + 1)
+    else:
+        rv_r = [R]
+    return rv_r + rv_l


--- a/yt/frontends/ramses/data_structures.py	Tue Jun 07 20:22:57 2011 -0400
+++ b/yt/frontends/ramses/data_structures.py	Tue Jun 07 22:45:54 2011 -0400
@@ -181,29 +181,24 @@
             # Strictly speaking, we don't care about the index of any
             # individual oct at this point.  So we can then split them up.
             unique_indices = na.unique(hilbert_indices)
-            print "Level % 2i has % 10i unique indices for %0.3e octs" % (
+            mylog.debug("Level % 2i has % 10i unique indices for %0.3e octs",
                         level, unique_indices.size, hilbert_indices.size)
-            all_indices = _ramses_reader.get_array_indices_lists(
-                        hilbert_indices, unique_indices)
-            for curve_index, my_octs in zip(unique_indices, all_indices):
-                #print "Handling", curve_index
-                #my_octs = (hilbert_indices == curve_index)
-                dleft_index = left_index[my_octs,:]
-                dfl = fl[my_octs,:]
+            locs, lefts = _ramses_reader.get_array_indices_lists(
+                        hilbert_indices, unique_indices, left_index, fl)
+            for dleft_index, dfl in zip(lefts, locs):
                 initial_left = na.min(dleft_index, axis=0)
                 idims = (na.max(dleft_index, axis=0) - initial_left).ravel()+2
-                #if level > 10: insert_ipython()
-                #print initial_left, idims
                 psg = _ramses_reader.ProtoSubgrid(initial_left, idims,
                                 dleft_index, dfl)
                 if psg.efficiency <= 0: continue
                 self.num_deep = 0
-                psgs.extend(self._recursive_patch_splitting(
+                psgs.extend(_ramses_reader.recursive_patch_splitting(
                     psg, idims, initial_left, 
                     dleft_index, dfl))
-            print "Done with level % 2i" % (level)
+            mylog.debug("Done with level % 2i", level)
             pbar.finish()
             self.proto_grids.append(psgs)
+            print sum(len(psg.grid_file_locations) for psg in psgs)
             sums = na.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
@@ -212,56 +207,6 @@
             #assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
-    num_deep = 0
-
-    @num_deep_inc
-    def _recursive_patch_splitting(self, psg, dims, ind,
-            left_index, fl):
-        min_eff = 0.1 # This isn't always respected.
-        if self.num_deep > 40:
-            # If we've recursed more than 100 times, we give up.
-            psg.efficiency = min_eff
-            return [psg]
-        if psg.efficiency > min_eff or psg.efficiency < 0.0:
-            return [psg]
-        tt, ax, fp = psg.find_split()
-        if (fp % 2) != 0:
-            if dims[ax] != fp + 1:
-                fp += 1
-            else:
-                fp -= 1
-        #print " " * self.num_deep + "Got ax", ax, "fp", fp
-        dims_l = dims.copy()
-        dims_l[ax] = fp
-        li_l = ind.copy()
-        if na.any(dims_l <= 0): return [psg]
-        L = _ramses_reader.ProtoSubgrid(
-                li_l, dims_l, left_index, fl)
-        #print " " * self.num_deep + "L", tt, L.efficiency
-        if L.efficiency > 1.0: raise RuntimeError
-        if L.efficiency <= 0.0: L = []
-        elif L.efficiency < min_eff:
-            L = self._recursive_patch_splitting(L, dims_l, li_l,
-                    left_index, fl)
-        else:
-            L = [L]
-        dims_r = dims.copy()
-        dims_r[ax] -= fp
-        li_r = ind.copy()
-        li_r[ax] += fp
-        if na.any(dims_r <= 0): return [psg]
-        R = _ramses_reader.ProtoSubgrid(
-                li_r, dims_r, left_index, fl)
-        #print " " * self.num_deep + "R", tt, R.efficiency
-        if R.efficiency > 1.0: raise RuntimeError
-        if R.efficiency <= 0.0: R = []
-        elif R.efficiency < min_eff:
-            R = self._recursive_patch_splitting(R, dims_r, li_r,
-                    left_index, fl)
-        else:
-            R = [R]
-        return L + R
-        
     def _parse_hierarchy(self):
         # We have important work to do
         grids = []


http://bitbucket.org/yt_analysis/yt/changeset/6f2f4b966116/
changeset:   6f2f4b966116
branch:      yt
user:        MatthewTurk
date:        2011-06-08 17:54:22
summary:     Backing out changeset f9c54e658b4c, which broke parallel extrema calculations.
affected #:  1 file (126 bytes)

--- a/yt/data_objects/derived_quantities.py	Tue Jun 07 22:45:54 2011 -0400
+++ b/yt/data_objects/derived_quantities.py	Wed Jun 08 11:54:22 2011 -0400
@@ -109,10 +109,7 @@
         rv = []
         for my_list in self.retvals:
             data = na.array(my_list).transpose()
-            old_shape = data.shape
-            data = self._mpi_catarray(data).transpose()
-            if len(data.shape) != old_shape: data = data.squeeze()
-            rv.append(data)
+            rv.append(self._mpi_catarray(data).transpose())
         self.retvals = rv
         
     def _call_func_unlazy(self, args, kwargs):


http://bitbucket.org/yt_analysis/yt/changeset/a99229151aed/
changeset:   a99229151aed
branch:      yt
user:        MatthewTurk
date:        2011-06-08 17:55:03
summary:     Merging
affected #:  11 files (12.6 KB)

--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py	Wed Jun 08 11:55:03 2011 -0400
@@ -566,7 +566,7 @@
                                                     antialias=False)
                         dataset_name = "%s_%s" % (hp['field'], hp['weight_field'])
                         if save_cube:
-                            if dataset_name in output.listnames(): del output[dataset_name]
+                            if dataset_name in output: del output[dataset_name]
                             output.create_dataset(dataset_name, data=frb[hp['field']])
                         if save_images:
                             filename = "%s/Halo_%04d_%s_%s.png" % (my_output_dir, halo['id'], 


--- a/yt/analysis_modules/light_cone/light_cone.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/analysis_modules/light_cone/light_cone.py	Wed Jun 08 11:55:03 2011 -0400
@@ -520,7 +520,7 @@
 
         output = h5py.File(filename, "a")
 
-        node_exists = field_node in output.listnames()
+        node_exists = field_node in output
 
         if node_exists:
             if over_write:


--- a/yt/data_objects/hierarchy.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/data_objects/hierarchy.py	Wed Jun 08 11:55:03 2011 -0400
@@ -174,8 +174,12 @@
         self._barrier()
         if not writeable and not exists: return
         if writeable:
-            self._data_mode = 'a'
-            if not exists: self.__create_data_file(fn)
+            try:
+                if not exists: self.__create_data_file(fn)
+                self._data_mode = 'a'
+            except IOError:
+                self._data_mode = None
+                return
         else:
             self._data_mode = 'r'
 
@@ -200,15 +204,15 @@
 
         if self._data_mode != 'a': return
         if "ArgsError" in dir(h5py.h5):
-            exception = h5py.h5.ArgsError
+            exception = (h5py.h5.ArgsError, KeyError)
         else:
-            exception = h5py.h5.H5Error
+            exception = (h5py.h5.H5Error, KeyError)
         try:
             node_loc = self._data_file[node]
-            if name in node_loc.listnames() and force:
+            if name in node_loc and force:
                 mylog.info("Overwriting node %s/%s", node, name)
                 del self._data_file[node][name]
-            elif name in node_loc.listnames() and passthrough:
+            elif name in node_loc and passthrough:
                 return
         except exception:
             pass
@@ -268,10 +272,10 @@
         myGroup = self._data_file['/']
         for group in node.split('/'):
             if group:
-                if group not in myGroup.listnames():
+                if group not in myGroup:
                     return None
                 myGroup = myGroup[group]
-        if name not in myGroup.listnames():
+        if name not in myGroup:
             return None
 
         full_name = "%s/%s" % (node, name)


--- a/yt/frontends/castro/data_structures.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/frontends/castro/data_structures.py	Wed Jun 08 11:55:03 2011 -0400
@@ -352,8 +352,8 @@
             g._particle_offset = pg[2]
         self.grid_particle_count[:,0] = self.pgrid_info[:,1]
         del self.pgrid_info
-        self.grid_levels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
+        gls = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels[:] = gls.reshape((self.num_grids,1))
         grid_dcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels], axis=0)
         self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
         self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))


--- a/yt/frontends/chombo/data_structures.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/frontends/chombo/data_structures.py	Wed Jun 08 11:55:03 2011 -0400
@@ -102,7 +102,7 @@
         self._fhandle = h5py.File(self.hierarchy_filename)
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = self._fhandle.listnames()[1:]
+        self._levels = [fn for fn in self._fhandle if fn != "Chombo_global"]
         AMRHierarchy.__init__(self,pf,data_style)
 
         self._fhandle.close()
@@ -129,7 +129,7 @@
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
-        levels = f.listnames()[1:]
+        levels = [fn for fn in f if fn != "Chombo_global"]
         self.grids = []
         i = 0
         for lev in levels:
@@ -301,8 +301,7 @@
     def _is_valid(self, *args, **kwargs):
         try:
             fileh = h5py.File(args[0],'r')
-            if (fileh.listnames())[0] == 'Chombo_global':
-                return True
+            return "Chombo_global" in fileh["/"]
         except:
             pass
         return False


--- a/yt/utilities/_amr_utils/kdtree.c	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/utilities/_amr_utils/kdtree.c	Wed Jun 08 11:55:03 2011 -0400
@@ -50,11 +50,11 @@
 
 struct kdhyperrect {
 	int dim;
-	double *min, *max;              /* minimum/maximum coords */
+	npy_float64 *min, *max;              /* minimum/maximum coords */
 };
 
 struct kdnode {
-	double *pos;
+	npy_float64 *pos;
 	int dir;
 	void *data;
 
@@ -63,7 +63,7 @@
 
 struct res_node {
 	struct kdnode *item;
-	double dist_sq;
+	npy_float64 dist_sq;
 	struct res_node *next;
 };
 
@@ -84,15 +84,15 @@
 
 
 static void clear_rec(struct kdnode *node, void (*destr)(void*));
-static int insert_rec(struct kdnode **node, const double *pos, void *data, int dir, int dim);
-static int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq);
+static int insert_rec(struct kdnode **node, const npy_float64 *pos, void *data, int dir, int dim);
+static int rlist_insert(struct res_node *list, struct kdnode *item, npy_float64 dist_sq);
 static void clear_results(struct kdres *set);
 
-static struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max);
+static struct kdhyperrect* hyperrect_create(int dim, const npy_float64 *min, const npy_float64 *max);
 static void hyperrect_free(struct kdhyperrect *rect);
 static struct kdhyperrect* hyperrect_duplicate(const struct kdhyperrect *rect);
-static void hyperrect_extend(struct kdhyperrect *rect, const double *pos);
-static double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos);
+static void hyperrect_extend(struct kdhyperrect *rect, const npy_float64 *pos);
+static npy_float64 hyperrect_dist_sq(struct kdhyperrect *rect, const npy_float64 *pos);
 
 #ifdef USE_LIST_NODE_ALLOCATOR
 static struct res_node *alloc_resnode(void);
@@ -159,7 +159,7 @@
 }
 
 
-static int insert_rec(struct kdnode **nptr, const double *pos, void *data, int dir, int dim)
+static int insert_rec(struct kdnode **nptr, const npy_float64 *pos, void *data, int dir, int dim)
 {
 	int new_dir;
 	struct kdnode *node;
@@ -188,7 +188,7 @@
 	return insert_rec(&(*nptr)->right, pos, data, new_dir, dim);
 }
 
-int kd_insert(struct kdtree *tree, const double *pos, void *data)
+int kd_insert(struct kdtree *tree, const npy_float64 *pos, void *data)
 {
 	if (insert_rec(&tree->root, pos, data, 0, tree->dim)) {
 		return -1;
@@ -205,8 +205,8 @@
 
 int kd_insertf(struct kdtree *tree, const float *pos, void *data)
 {
-	static double sbuf[16];
-	double *bptr, *buf = 0;
+	static npy_float64 sbuf[16];
+	npy_float64 *bptr, *buf = 0;
 	int res, dim = tree->dim;
 
 	if(dim > 16) {
@@ -236,9 +236,9 @@
 	return res;
 }
 
-int kd_insert3(struct kdtree *tree, double x, double y, double z, void *data)
+int kd_insert3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z, void *data)
 {
-	double buf[3];
+	npy_float64 buf[3];
 	buf[0] = x;
 	buf[1] = y;
 	buf[2] = z;
@@ -247,16 +247,16 @@
 
 int kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data)
 {
-	double buf[3];
+	npy_float64 buf[3];
 	buf[0] = x;
 	buf[1] = y;
 	buf[2] = z;
 	return kd_insert(tree, buf, data);
 }
 
-static int find_nearest(struct kdnode *node, const double *pos, double range, struct res_node *list, int ordered, int dim)
+static int find_nearest(struct kdnode *node, const npy_float64 *pos, npy_float64 range, struct res_node *list, int ordered, int dim)
 {
-	double dist_sq, dx;
+	npy_float64 dist_sq, dx;
 	int i, ret, added_res = 0;
 
 	if(!node) return 0;
@@ -287,13 +287,13 @@
 	return added_res;
 }
 
-static void kd_nearest_i(struct kdnode *node, const double *pos, struct kdnode **result, double *result_dist_sq, struct kdhyperrect* rect)
+static void kd_nearest_i(struct kdnode *node, const npy_float64 *pos, struct kdnode **result, npy_float64 *result_dist_sq, struct kdhyperrect* rect)
 {
 	int dir = node->dir;
 	int i, side;
-	double dummy, dist_sq;
+	npy_float64 dummy, dist_sq;
 	struct kdnode *nearer_subtree, *farther_subtree;
-	double *nearer_hyperrect_coord, *farther_hyperrect_coord;
+	npy_float64 *nearer_hyperrect_coord, *farther_hyperrect_coord;
 
 	/* Decide whether to go left or right in the tree */
 	dummy = pos[dir] - node->pos[dir];
@@ -348,12 +348,12 @@
 	}
 }
 
-struct kdres *kd_nearest(struct kdtree *kd, const double *pos)
+struct kdres *kd_nearest(struct kdtree *kd, const npy_float64 *pos)
 {
 	struct kdhyperrect *rect;
 	struct kdnode *result;
 	struct kdres *rset;
-	double dist_sq;
+	npy_float64 dist_sq;
 	int i;
 
 	if (!kd) return 0;
@@ -405,8 +405,8 @@
 
 struct kdres *kd_nearestf(struct kdtree *tree, const float *pos)
 {
-	static double sbuf[16];
-	double *bptr, *buf = 0;
+	static npy_float64 sbuf[16];
+	npy_float64 *bptr, *buf = 0;
 	int dim = tree->dim;
 	struct kdres *res;
 
@@ -437,9 +437,9 @@
 	return res;
 }
 
-struct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z)
+struct kdres *kd_nearest3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z)
 {
-	double pos[3];
+	npy_float64 pos[3];
 	pos[0] = x;
 	pos[1] = y;
 	pos[2] = z;
@@ -448,14 +448,14 @@
 
 struct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z)
 {
-	double pos[3];
+	npy_float64 pos[3];
 	pos[0] = x;
 	pos[1] = y;
 	pos[2] = z;
 	return kd_nearest(tree, pos);
 }
 
-struct kdres *kd_nearest_range(struct kdtree *kd, const double *pos, double range)
+struct kdres *kd_nearest_range(struct kdtree *kd, const npy_float64 *pos, npy_float64 range)
 {
 	int ret;
 	struct kdres *rset;
@@ -481,8 +481,8 @@
 
 struct kdres *kd_nearest_rangef(struct kdtree *kd, const float *pos, float range)
 {
-	static double sbuf[16];
-	double *bptr, *buf = 0;
+	static npy_float64 sbuf[16];
+	npy_float64 *bptr, *buf = 0;
 	int dim = kd->dim;
 	struct kdres *res;
 
@@ -513,9 +513,9 @@
 	return res;
 }
 
-struct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range)
+struct kdres *kd_nearest_range3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z, npy_float64 range)
 {
-	double buf[3];
+	npy_float64 buf[3];
 	buf[0] = x;
 	buf[1] = y;
 	buf[2] = z;
@@ -524,7 +524,7 @@
 
 struct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range)
 {
-	double buf[3];
+	npy_float64 buf[3];
 	buf[0] = x;
 	buf[1] = y;
 	buf[2] = z;
@@ -559,7 +559,7 @@
 	return rset->riter != 0;
 }
 
-void *kd_res_item(struct kdres *rset, double *pos)
+void *kd_res_item(struct kdres *rset, npy_float64 *pos)
 {
 	if(rset->riter) {
 		if(pos) {
@@ -584,7 +584,7 @@
 	return 0;
 }
 
-void *kd_res_item3(struct kdres *rset, double *x, double *y, double *z)
+void *kd_res_item3(struct kdres *rset, npy_float64 *x, npy_float64 *y, npy_float64 *z)
 {
 	if(rset->riter) {
 		if(*x) *x = rset->riter->item->pos[0];
@@ -610,9 +610,9 @@
 }
 
 /* ---- hyperrectangle helpers ---- */
-static struct kdhyperrect* hyperrect_create(int dim, const double *min, const double *max)
+static struct kdhyperrect* hyperrect_create(int dim, const npy_float64 *min, const npy_float64 *max)
 {
-	size_t size = dim * sizeof(double);
+	size_t size = dim * sizeof(npy_float64);
 	struct kdhyperrect* rect = 0;
 
 	if (!(rect = malloc(sizeof(struct kdhyperrect)))) {
@@ -647,7 +647,7 @@
 	return hyperrect_create(rect->dim, rect->min, rect->max);
 }
 
-static void hyperrect_extend(struct kdhyperrect *rect, const double *pos)
+static void hyperrect_extend(struct kdhyperrect *rect, const npy_float64 *pos)
 {
 	int i;
 
@@ -661,10 +661,10 @@
 	}
 }
 
-static double hyperrect_dist_sq(struct kdhyperrect *rect, const double *pos)
+static npy_float64 hyperrect_dist_sq(struct kdhyperrect *rect, const npy_float64 *pos)
 {
 	int i;
-	double result = 0;
+	npy_float64 result = 0;
 
 	for (i=0; i < rect->dim; i++) {
 		if (pos[i] < rect->min[i]) {
@@ -727,7 +727,7 @@
 
 
 /* inserts the item. if dist_sq is >= 0, then do an ordered insert */
-static int rlist_insert(struct res_node *list, struct kdnode *item, double dist_sq)
+static int rlist_insert(struct res_node *list, struct kdnode *item, npy_float64 dist_sq)
 {
 	struct res_node *rnode;
 


--- a/yt/utilities/_amr_utils/kdtree.h	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/utilities/_amr_utils/kdtree.h	Wed Jun 08 11:55:03 2011 -0400
@@ -26,6 +26,8 @@
 */
 #ifndef _KDTREE_H_
 #define _KDTREE_H_
+#include "Python.h"
+#include "numpy/ndarrayobject.h"
 
 #ifdef __cplusplus
 extern "C" {
@@ -51,18 +53,18 @@
 void kd_data_destructor(struct kdtree *tree, void (*destr)(void*));
 
 /* insert a node, specifying its position, and optional data */
-int kd_insert(struct kdtree *tree, const double *pos, void *data);
+int kd_insert(struct kdtree *tree, const npy_float64 *pos, void *data);
 int kd_insertf(struct kdtree *tree, const float *pos, void *data);
-int kd_insert3(struct kdtree *tree, double x, double y, double z, void *data);
+int kd_insert3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z, void *data);
 int kd_insert3f(struct kdtree *tree, float x, float y, float z, void *data);
 
 /* Find one of the nearest nodes from the specified point.
  *
  * This function returns a pointer to a result set with at most one element.
  */
-struct kdres *kd_nearest(struct kdtree *tree, const double *pos);
+struct kdres *kd_nearest(struct kdtree *tree, const npy_float64 *pos);
 struct kdres *kd_nearestf(struct kdtree *tree, const float *pos);
-struct kdres *kd_nearest3(struct kdtree *tree, double x, double y, double z);
+struct kdres *kd_nearest3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z);
 struct kdres *kd_nearest3f(struct kdtree *tree, float x, float y, float z);
 
 /* Find any nearest nodes from the specified point within a range.
@@ -73,9 +75,9 @@
  * a valid result set is always returned which may contain 0 or more elements.
  * The result set must be deallocated with kd_res_free, after use.
  */
-struct kdres *kd_nearest_range(struct kdtree *tree, const double *pos, double range);
+struct kdres *kd_nearest_range(struct kdtree *tree, const npy_float64 *pos, npy_float64 range);
 struct kdres *kd_nearest_rangef(struct kdtree *tree, const float *pos, float range);
-struct kdres *kd_nearest_range3(struct kdtree *tree, double x, double y, double z, double range);
+struct kdres *kd_nearest_range3(struct kdtree *tree, npy_float64 x, npy_float64 y, npy_float64 z, npy_float64 range);
 struct kdres *kd_nearest_range3f(struct kdtree *tree, float x, float y, float z, float range);
 
 /* frees a result set returned by kd_nearest_range() */
@@ -98,9 +100,9 @@
 /* returns the data pointer (can be null) of the current result set item
  * and optionally sets its position to the pointers(s) if not null.
  */
-void *kd_res_item(struct kdres *set, double *pos);
+void *kd_res_item(struct kdres *set, npy_float64 *pos);
 void *kd_res_itemf(struct kdres *set, float *pos);
-void *kd_res_item3(struct kdres *set, double *x, double *y, double *z);
+void *kd_res_item3(struct kdres *set, npy_float64 *x, npy_float64 *y, npy_float64 *z);
 void *kd_res_item3f(struct kdres *set, float *x, float *y, float *z);
 
 /* equivalent to kd_res_item(set, 0) */


--- a/yt/utilities/_amr_utils/kdtree_utils.pxd	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/utilities/_amr_utils/kdtree_utils.pxd	Wed Jun 08 11:55:03 2011 -0400
@@ -34,18 +34,18 @@
     kdtree *kd_create(int k)
     void kd_free(kdtree *tree)
     
-    int kd_insert3(kdtree *tree, double x, double y, double z, void *data)
-    kdres *kd_nearest3(kdtree *tree, double x, double y, double z)
+    int kd_insert3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z, void *data)
+    kdres *kd_nearest3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z)
 
-    kdres *kd_nearest_range3(kdtree *tree, double x, double y, double z,
-                             double range)
+    kdres *kd_nearest_range3(kdtree *tree, np.float64_t x, np.float64_t y, np.float64_t z,
+                             np.float64_t range)
 
     void kd_res_free(kdres *set)
     int kd_res_size(kdres *set)
     int kd_res_next(kdres *set)
     void kd_res_rewind(kdres *set)
 
-    void kd_res_item3(kdres *set, double *x, double *y, double *z)
+    void kd_res_item3(kdres *set, np.float64_t *x, np.float64_t *y, np.float64_t *z)
     void *kd_res_item_data(kdres *set)
 
     void kd_data_destructor(kdtree *tree, void (*destr)(void*))


--- a/yt/utilities/command_line.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/utilities/command_line.py	Wed Jun 08 11:55:03 2011 -0400
@@ -635,12 +635,60 @@
         pp = PostInventory()
         pp.add_post(arg, desc=opts.desc)
 
+    @cmdln.option("-l", "--language", action="store",
+                  default = None, dest="language",
+                  help="Use syntax highlighter for the file in language")
+    @cmdln.option("-L", "--languages", action="store_true",
+                  default = False, dest="languages",
+                  help="Retrive a list of supported languages")
+    @cmdln.option("-e", "--encoding", action="store",
+                  default = 'utf-8', dest="encoding",
+                  help="Specify the encoding of a file (default is "
+                        "utf-8 or guessing if available)")
+    @cmdln.option("-b", "--open-browser", action="store_true",
+                  default = False, dest="open_browser",
+                  help="Open the paste in a web browser")
+    @cmdln.option("-p", "--private", action="store_true",
+                  default = False, dest="private",
+                  help="Paste as private")
+    @cmdln.option("-c", "--clipboard", action="store_true",
+                  default = False, dest="clipboard",
+                  help="File to output to; else, print.")
+    def do_pastebin(self, subcmd, opts, arg):
+        """
+        Post a script to an anonymous pastebin.
+
+        Usage: yt pastebin [options] <script>
+
+        ${cmd_option_list}
+        """
+        import yt.utilities.lodgeit as lo
+        lo.main( arg, languages=opts.languages, language=opts.language,
+                 encoding=opts.encoding, open_browser=opts.open_browser,
+                 private=opts.private, clipboard=opts.clipboard)
+
+    def do_pastebin_grab(self, subcmd, opts, arg):
+        """
+        Print an online pastebin to STDOUT for local use. Paste ID is 
+        the number at the end of the url.  So to locally access pastebin:
+        http://paste.enzotools.org/show/1688/
+
+        Usage: yt pastebin_grab <Paste ID> 
+        Ex: yt pastebin_grab 1688 > script.py
+
+        """
+        import yt.utilities.lodgeit as lo
+        lo.main( None, download=arg )
+
     @cmdln.option("-o", "--output", action="store",
                   default = None, dest="output_fn",
                   help="File to output to; else, print.")
-    def do_pastegrab(self, subcmd, opts, username, paste_id):
+    def do_pasteboard_grab(self, subcmd, opts, username, paste_id):
         """
         Download from your or another user's pasteboard.
+
+        ${cmd_usage} 
+        ${cmd_option_list}
         """
         from yt.utilities.pasteboard import retrieve_pastefile
         retrieve_pastefile(username, paste_id, opts.output_fn)
@@ -648,6 +696,9 @@
     def do_bugreport(self, subcmd, opts):
         """
         Report a bug in yt
+
+        ${cmd_usage} 
+        ${cmd_option_list}
         """
         print "==============================================================="
         print
@@ -758,6 +809,9 @@
     def do_bootstrap_dev(self, subcmd, opts):
         """
         Bootstrap a yt development environment
+
+        ${cmd_usage} 
+        ${cmd_option_list}
         """
         from mercurial import hg, ui, commands
         import imp


--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/yt/utilities/lodgeit.py	Wed Jun 08 11:55:03 2011 -0400
@@ -0,0 +1,317 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+    LodgeIt!
+    ~~~~~~~~
+
+    A script that pastes stuff into the enzotools pastebin on
+    paste.enztools.org.
+
+    Modified (very, very slightly) from the original script by the authors
+    below.
+
+    .lodgeitrc / _lodgeitrc
+    -----------------------
+
+    Under UNIX create a file called ``~/.lodgeitrc``, under Windows
+    create a file ``%APPDATA%/_lodgeitrc`` to override defaults::
+
+        language=default_language
+        clipboard=true/false
+        open_browser=true/false
+        encoding=fallback_charset
+
+    :authors: 2007-2008 Georg Brandl <georg at python.org>,
+              2006 Armin Ronacher <armin.ronacher at active-4.com>,
+              2006 Matt Good <matt at matt-good.net>,
+              2005 Raphael Slinckx <raphael at slinckx.net>
+"""
+import os
+import sys
+from optparse import OptionParser
+
+
+SCRIPT_NAME = os.path.basename(sys.argv[0])
+VERSION = '0.3'
+SERVICE_URL = 'http://paste.enzotools.org/'
+SETTING_KEYS = ['author', 'title', 'language', 'private', 'clipboard',
+                'open_browser']
+
+# global server proxy
+_xmlrpc_service = None
+
+
+def fail(msg, code):
+    """Bail out with an error message."""
+    print >> sys.stderr, 'ERROR: %s' % msg
+    sys.exit(code)
+
+
+def load_default_settings():
+    """Load the defaults from the lodgeitrc file."""
+    settings = {
+        'language':     None,
+        'clipboard':    True,
+        'open_browser': False,
+        'encoding':     'iso-8859-15'
+    }
+    rcfile = None
+    if os.name == 'posix':
+        rcfile = os.path.expanduser('~/.lodgeitrc')
+    elif os.name == 'nt' and 'APPDATA' in os.environ:
+        rcfile = os.path.expandvars(r'$APPDATA\_lodgeitrc')
+    if rcfile:
+        try:
+            f = open(rcfile)
+            for line in f:
+                if line.strip()[:1] in '#;':
+                    continue
+                p = line.split('=', 1)
+                if len(p) == 2:
+                    key = p[0].strip().lower()
+                    if key in settings:
+                        if key in ('clipboard', 'open_browser'):
+                            settings[key] = p[1].strip().lower() in \
+                                            ('true', '1', 'on', 'yes')
+                        else:
+                            settings[key] = p[1].strip()
+            f.close()
+        except IOError:
+            pass
+    settings['tags'] = []
+    settings['title'] = None
+    return settings
+
+
+def make_utf8(text, encoding):
+    """Convert a text to UTF-8, brute-force."""
+    try:
+        u = unicode(text, 'utf-8')
+        uenc = 'utf-8'
+    except UnicodeError:
+        try:
+            u = unicode(text, encoding)
+            uenc = 'utf-8'
+        except UnicodeError:
+            u = unicode(text, 'iso-8859-15', 'ignore')
+            uenc = 'iso-8859-15'
+    try:
+        import chardet
+    except ImportError:
+        return u.encode('utf-8')
+    d = chardet.detect(text)
+    if d['encoding'] == uenc:
+        return u.encode('utf-8')
+    return unicode(text, d['encoding'], 'ignore').encode('utf-8')
+
+
+def get_xmlrpc_service():
+    """Create the XMLRPC server proxy and cache it."""
+    global _xmlrpc_service
+    import xmlrpclib
+    if _xmlrpc_service is None:
+        try:
+            _xmlrpc_service = xmlrpclib.ServerProxy(SERVICE_URL + 'xmlrpc/',
+                                                    allow_none=True)
+        except Exception, err:
+            fail('Could not connect to Pastebin: %s' % err, -1)
+    return _xmlrpc_service
+
+
+def copy_url(url):
+    """Copy the url into the clipboard."""
+    # try windows first
+    try:
+        import win32clipboard
+    except ImportError:
+        # then give pbcopy a try.  do that before gtk because
+        # gtk might be installed on os x but nobody is interested
+        # in the X11 clipboard there.
+        from subprocess import Popen, PIPE
+        try:
+            client = Popen(['pbcopy'], stdin=PIPE)
+        except OSError:
+            try:
+                import pygtk
+                pygtk.require('2.0')
+                import gtk
+                import gobject
+            except ImportError:
+                return
+            gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url)
+            gobject.idle_add(gtk.main_quit)
+            gtk.main()
+        else:
+            client.stdin.write(url)
+            client.stdin.close()
+            client.wait()
+    else:
+        win32clipboard.OpenClipboard()
+        win32clipboard.EmptyClipboard()
+        win32clipboard.SetClipboardText(url)
+        win32clipboard.CloseClipboard()
+
+
+def open_webbrowser(url):
+    """Open a new browser window."""
+    import webbrowser
+    webbrowser.open(url)
+
+
+def language_exists(language):
+    """Check if a language alias exists."""
+    xmlrpc = get_xmlrpc_service()
+    langs = xmlrpc.pastes.getLanguages()
+    return language in langs
+
+
+def get_mimetype(data, filename):
+    """Try to get MIME type from data."""
+    try:
+        import gnomevfs
+    except ImportError:
+        from mimetypes import guess_type
+        if filename:
+            return guess_type(filename)[0]
+    else:
+        if filename:
+            return gnomevfs.get_mime_type(os.path.abspath(filename))
+        return gnomevfs.get_mime_type_for_data(data)
+
+
+def print_languages():
+    """Print a list of all supported languages, with description."""
+    xmlrpc = get_xmlrpc_service()
+    languages = xmlrpc.pastes.getLanguages().items()
+    languages.sort(lambda a, b: cmp(a[1].lower(), b[1].lower()))
+    print 'Supported Languages:'
+    for alias, name in languages:
+        print '    %-30s%s' % (alias, name)
+
+
+def download_paste(uid):
+    """Download a paste given by ID."""
+    xmlrpc = get_xmlrpc_service()
+    paste = xmlrpc.pastes.getPaste(uid)
+    if not paste:
+        fail('Paste "%s" does not exist.' % uid, 5)
+    print paste['code'].encode('utf-8')
+
+
+def create_paste(code, language, filename, mimetype, private):
+    """Create a new paste."""
+    xmlrpc = get_xmlrpc_service()
+    rv = xmlrpc.pastes.newPaste(language, code, None, filename, mimetype,
+                                private)
+    if not rv:
+        fail('Could not create paste. Something went wrong '
+             'on the server side.', 4)
+    return rv
+
+
+def compile_paste(filenames, langopt):
+    """Create a single paste out of zero, one or multiple files."""
+    def read_file(f):
+        try:
+            return f.read()
+        finally:
+            f.close()
+    mime = ''
+    lang = langopt or ''
+    if not filenames:
+        data = read_file(sys.stdin)
+        if not langopt:
+            mime = get_mimetype(data, '') or ''
+        fname = ""
+    elif len(filenames) == 1:
+        fname = filenames[0]
+        data = read_file(open(filenames[0], 'rb'))
+        if not langopt:
+            mime = get_mimetype(data, filenames[0]) or ''
+    else:
+        result = []
+        for fname in filenames:
+            data = read_file(open(fname, 'rb'))
+            if langopt:
+                result.append('### %s [%s]\n\n' % (fname, langopt))
+            else:
+                result.append('### %s\n\n' % fname)
+            result.append(data)
+            result.append('\n\n')
+        data = ''.join(result)
+        lang = 'multi'
+    return data, lang, fname, mime
+
+
+def main( filename, languages=False, language=None, encoding='utf-8', 
+          open_browser=False, private=False, clipboard=False, 
+          download=None ):
+    """Paste a given script into a pastebin using the Lodgeit tool."""
+
+#    usage = ('Usage: %%prog [options] [FILE ...]\n\n'
+#             'Read the files and paste their contents to %s.\n'
+#             'If no file is given, read from standard input.\n'
+#             'If multiple files are given, they are put into a single paste.'
+#             % SERVICE_URL)
+#    parser = OptionParser(usage=usage)
+#
+#    settings = load_default_settings()
+#
+#    parser.add_option('-v', '--version', action='store_true',
+#                      help='Print script version')
+#    parser.add_option('-L', '--languages', action='store_true', default=False,
+#                      help='Retrieve a list of supported languages')
+#    parser.add_option('-l', '--language', default=settings['language'],
+#                      help='Used syntax highlighter for the file')
+#    parser.add_option('-e', '--encoding', default=settings['encoding'],
+#                      help='Specify the encoding of a file (default is '
+#                           'utf-8 or guessing if available)')
+#    parser.add_option('-b', '--open-browser', dest='open_browser',
+#                      action='store_true',
+#                      default=settings['open_browser'],
+#                      help='Open the paste in a web browser')
+#    parser.add_option('-p', '--private', action='store_true', default=False,
+#                      help='Paste as private')
+#    parser.add_option('--no-clipboard', dest='clipboard',
+#                      action='store_false',
+#                      default=settings['clipboard'],
+#                      help="Don't copy the url into the clipboard")
+#    parser.add_option('--download', metavar='UID',
+#                      help='Download a given paste')
+#
+#    opts, args = parser.parse_args()
+#
+    if languages:
+        print_languages()
+        return
+    elif download:
+        download_paste(download)
+        return
+
+    # check language if given
+    if language and not language_exists(language):
+        print 'Language %s is not supported.' % language
+        return
+
+    # load file(s)
+    args = [ filename ]
+    try:
+        data, language, filename, mimetype = compile_paste(args, language)
+    except Exception, err:
+        fail('Error while reading the file(s): %s' % err, 2)
+    if not data:
+        fail('Aborted, no content to paste.', 4)
+
+    # create paste
+    code = make_utf8(data, encoding)
+    pid = create_paste(code, language, filename, mimetype, private)
+    url = '%sshow/%s/' % (SERVICE_URL, pid)
+    print url
+    if open_browser:
+        open_webbrowser(url)
+    if clipboard:
+        copy_url(url)
+
+
+if __name__ == '__main__':
+    sys.exit(main())


--- a/yt/visualization/volume_rendering/grid_partitioner.py	Wed Jun 08 11:54:22 2011 -0400
+++ b/yt/visualization/volume_rendering/grid_partitioner.py	Wed Jun 08 11:55:03 2011 -0400
@@ -403,7 +403,7 @@
 
 def import_partitioned_grids(fn, int_type=na.int64, float_type=na.float64):
     f = h5py.File(fn, "r")
-    n_groups = len(f.listnames())
+    n_groups = len(f)
     grid_list = []
     dims = f["/PGrids/Dims"][:].astype(int_type)
     left_edges = f["/PGrids/LeftEdges"][:].astype(float_type)

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list