[yt-svn] commit/yt: 8 new changesets

Bitbucket commits-noreply at bitbucket.org
Tue Mar 13 12:28:37 PDT 2012


8 new commits in yt:


https://bitbucket.org/yt_analysis/yt/changeset/f0124525728d/
changeset:   f0124525728d
branch:      yt
user:        brittonsmith
date:        2012-03-02 20:51:14
summary:     First pass at adding variance to 1D profiles.
affected #:  2 files

diff -r 052fac82670180c2de69fb2173dd9503e2b4cd1c -r f0124525728dc966890e13e9c43ba52c78d63175 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -31,6 +31,7 @@
 from yt.funcs import *
 
 from yt.data_objects.data_containers import YTFieldData
+from yt.utilities.amr_utils import bin_profile1d
 from yt.utilities.data_point_utilities import \
     Bin1DProfile, Bin2DProfile, Bin3DProfile
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -88,9 +89,11 @@
         self._ngrids = 0
         self.__data = {}         # final results will go here
         self.__weight_data = {}  # we need to track the weights as we go
+        self.__std_data = {}
         for field in fields:
             self.__data[field] = self._get_empty_field()
             self.__weight_data[field] = self._get_empty_field()
+            self.__std_data = self._get_empty_field()
         self.__used = self._get_empty_field().astype('bool')
         #pbar = get_pbar('Binning grids', len(self._data_source._grids))
         for gi,grid in enumerate(self._get_grids(fields)):
@@ -103,10 +106,12 @@
                 continue
             for field in fields:
                 # We get back field values, weight values, used bins
-                f, w, u = self._bin_field(grid, field, weight, accumulation,
+                f, w, q, u = self._bin_field(grid, field, weight, accumulation,
                                           args=args, check_cut=True)
                 self.__data[field] += f        # running total
                 self.__weight_data[field] += w # running total
+                self.__std_data[field] += w * (q/w + \
+                    (f/w - self.__data[field]/self.__weight_field[field])**2) # running total
                 self.__used = (self.__used | u)       # running 'or'
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
@@ -115,24 +120,37 @@
         for field in fields:
             if weight: # Now, at the end, we divide out.
                 self.__data[field][ub] /= self.__weight_data[field][ub]
+                self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
+            self["%s_std" % field] = na.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
-        del self.__data, self.__weight_data, self.__used
+        del self.__data, self.__std_data, self.__weight_data, self.__used
 
     def _finalize_parallel(self):
+        my_mean = {}
+        my_weight = {}
+        for key in self.__data:
+            my_mean[key] = self.__data[key] / self.__weight_field[key]
+            my_weight[key] = self.__weight_data[key]
         for key in self.__data:
             self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
             self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
+        for key in self.__std_data:
+            self.__std_data[key] = my_weight[key] * (self.__std_data[key] / my_weight[key] + \
+                                                     (my_mean[key] - self.__data[key])**2)
+            self.__std_data[key] = self.comm.mpi_allreduce(self.__std_data[key], op='sum')
         self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 
     def _unlazy_add_fields(self, fields, weight, accumulation):
         for field in fields:
-            f, w, u = self._bin_field(self._data_source, field, weight,
-                                      accumulation, self._args, check_cut = False)
+            f, w, q, u = self._bin_field(self._data_source, field, weight,
+                                         accumulation, self._args, check_cut = False)
             if weight:
                 f[u] /= w[u]
+                q[u] = na.sqrt(q[u] / w[u])
             self[field] = f
+            self["%s_std" % field] = q
         self["UsedBins"] = u
 
     def add_fields(self, fields, weight = "CellMassMsun", accumulation = False, fractional=False):
@@ -246,20 +264,27 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin1DProfile(bin_indices_x, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        # Bin1DProfile(bin_indices_x, weight_data, source_data,
+        #              weight_field, binned_field,
+        #              m_field, q_field, used_field)
+        bin_profile1d(bin_indices_x, weight_data, source_data,
+                      weight_field, binned_field,
+                      m_field, q_field, used_field)
         # Fix for laziness, because at the *end* we will be
         # summing up all of the histograms and dividing by the
         # weights.  Accumulation likely doesn't work with weighted
         # average fields.
         if accumulation: 
             binned_field = na.add.accumulate(binned_field)
-        return binned_field, weight_field, used_field.astype("bool")
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):


diff -r 052fac82670180c2de69fb2173dd9503e2b4cd1c -r f0124525728dc966890e13e9c43ba52c78d63175 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -31,6 +31,28 @@
     # NOTE that size_t might not be int
     void *alloca(int)
 
+def bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
+		  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=1] wresult,
+	          np.ndarray[np.float64_t, ndim=1] bresult,
+		  np.ndarray[np.float64_t, ndim=1] mresult,
+		  np.ndarray[np.float64_t, ndim=1] qresult,
+	          np.ndarray[np.float64_t, ndim=1] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bin = bins_x[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bin] += (wresult[bin] * (wval*bval - mresult[bin])**2) / \
+            (wresult[bin] + wval)
+        wresult[bin] += wval
+        bresult[bin] += wval*bval
+        mresult[bin] += (wval*bval - mresult[bin]) / wresult[bin]
+        used[bin] = 1
+
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)



https://bitbucket.org/yt_analysis/yt/changeset/4a04124ff73a/
changeset:   4a04124ff73a
branch:      yt
user:        brittonsmith
date:        2012-03-02 20:52:04
summary:     Merged.
affected #:  1 file

diff -r f0124525728dc966890e13e9c43ba52c78d63175 -r 4a04124ff73a1c2b6b83249f15951e21472e59b5 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -93,6 +93,22 @@
 except ImportError:
     pass
 
+def __memory_fallback(pid):
+    """
+    Get process memory from a system call.
+    """
+    value = os.popen('ps -o rss= -p %d' % pid).read().strip().split('\n')
+    if len(value) == 1: return float(value[0])
+    value.pop(0)
+    for line in value:
+        online = line.split()
+        if online[0] != pid: continue
+        try:
+            return float(online[2])
+        except:
+            return 0.0
+    return 0.0
+
 def get_memory_usage():
     """
     Returning resident size in megabytes
@@ -101,10 +117,10 @@
     try:
         pagesize = resource.getpagesize()
     except NameError:
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     status_file = "/proc/%s/statm" % (pid)
     if not os.path.isfile(status_file):
-        return float(os.popen('ps -o rss= -p %d' % pid).read()) / 1024
+        return __memory_fallback(pid) / 1024
     line = open(status_file).read()
     size, resident, share, text, library, data, dt = [int(i) for i in line.split()]
     return resident * pagesize / (1024 * 1024) # return in megs



https://bitbucket.org/yt_analysis/yt/changeset/6fa31a4f9301/
changeset:   6fa31a4f9301
branch:      yt
user:        brittonsmith
date:        2012-03-02 22:01:43
summary:     Fixing bug in single pass variance calculation.
affected #:  1 file

diff -r 4a04124ff73a1c2b6b83249f15951e21472e59b5 -r 6fa31a4f9301408769ce00a221379ece0595607b yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -46,11 +46,11 @@
         bin = bins_x[n]
         bval = bsource[n]
         wval = wsource[n]
-        qresult[bin] += (wresult[bin] * (wval*bval - mresult[bin])**2) / \
+        qresult[bin] += (wresult[bin] * wval * (bval - mresult[bin])**2) / \
             (wresult[bin] + wval)
         wresult[bin] += wval
         bresult[bin] += wval*bval
-        mresult[bin] += (wval*bval - mresult[bin]) / wresult[bin]
+        mresult[bin] += wval * (bval - mresult[bin]) / wresult[bin]
         used[bin] = 1
 
 @cython.boundscheck(False)



https://bitbucket.org/yt_analysis/yt/changeset/310228a7ac89/
changeset:   310228a7ac89
branch:      yt
user:        brittonsmith
date:        2012-03-02 22:54:16
summary:     Fixing single-pass variance calculation for running in parallel.
affected #:  1 file

diff -r 6fa31a4f9301408769ce00a221379ece0595607b -r 310228a7ac89079959c00343635cb9a41f5f3063 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -93,7 +93,7 @@
         for field in fields:
             self.__data[field] = self._get_empty_field()
             self.__weight_data[field] = self._get_empty_field()
-            self.__std_data = self._get_empty_field()
+            self.__std_data[field] = self._get_empty_field()
         self.__used = self._get_empty_field().astype('bool')
         #pbar = get_pbar('Binning grids', len(self._data_source._grids))
         for gi,grid in enumerate(self._get_grids(fields)):
@@ -110,8 +110,9 @@
                                           args=args, check_cut=True)
                 self.__data[field] += f        # running total
                 self.__weight_data[field] += w # running total
-                self.__std_data[field] += w * (q/w + \
-                    (f/w - self.__data[field]/self.__weight_field[field])**2) # running total
+                self.__std_data[field][u] += w[u] * (q[u]/w[u] + \
+                    (f[u]/w[u] -
+                     self.__data[field][u]/self.__weight_data[field][u])**2) # running total
                 self.__used = (self.__used | u)       # running 'or'
             grid.clear_data()
         # When the loop completes the parallel finalizer gets called
@@ -130,15 +131,19 @@
         my_mean = {}
         my_weight = {}
         for key in self.__data:
-            my_mean[key] = self.__data[key] / self.__weight_field[key]
-            my_weight[key] = self.__weight_data[key]
+            my_mean[key] = self._get_empty_field()
+            my_weight[key] = self._get_empty_field()
+        ub = na.where(self.__used)
+        for key in self.__data:
+            my_mean[key][ub] = self.__data[key][ub] / self.__weight_data[key][ub]
+            my_weight[key][ub] = self.__weight_data[key][ub]
         for key in self.__data:
             self.__data[key] = self.comm.mpi_allreduce(self.__data[key], op='sum')
         for key in self.__weight_data:
             self.__weight_data[key] = self.comm.mpi_allreduce(self.__weight_data[key], op='sum')
         for key in self.__std_data:
-            self.__std_data[key] = my_weight[key] * (self.__std_data[key] / my_weight[key] + \
-                                                     (my_mean[key] - self.__data[key])**2)
+            self.__std_data[key][ub] = my_weight[key][ub] * (self.__std_data[key][ub] / my_weight[key][ub] + \
+                (my_mean[key][ub] - self.__data[key][ub]/self.__weight_data[key][ub])**2)
             self.__std_data[key] = self.comm.mpi_allreduce(self.__std_data[key], op='sum')
         self.__used = self.comm.mpi_allreduce(self.__used, op='sum')
 



https://bitbucket.org/yt_analysis/yt/changeset/0cb6471b405f/
changeset:   0cb6471b405f
branch:      yt
user:        brittonsmith
date:        2012-03-13 16:07:04
summary:     Merged.
affected #:  15 files

diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -21,6 +21,9 @@
                                 JC Passy (jcpassy at gmail.com)
                                 Eve Lee (elee at cita.utoronto.ca)
                                 Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                                Kacper Kowalik (xarthisius.kk at gmail.com)
+                                Nathan Goldbaum (goldbaum at ucolick.org)
+                                Anna Rosen (rosen at ucolick.org)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -395,6 +395,7 @@
         cd zlib-1.2.3
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -412,6 +413,7 @@
         cd libpng-1.2.43
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -429,6 +431,7 @@
         cd freetype-2.4.4
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -446,6 +449,7 @@
         cd hdf5-1.8.7
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -464,6 +468,7 @@
         cd sqlite-autoconf-3070500
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
@@ -479,6 +484,7 @@
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ln -sf ${DEST_DIR}/bin/python2.7 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+    ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 fi


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 tests/projections.py
--- a/tests/projections.py
+++ b/tests/projections.py
@@ -28,8 +28,10 @@
                 field=field, axis=axis, weight_field="Density")
 
 for field in field_list:
-    create_test(TestGasDistribution, "density_%s" % field,
-                field_x="Density", field_y=field)
-    create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
-                field_x="Density", field_y="x-velocity", field_z=field,
-                weight="CellMassMsun")
+    if field != "Density":
+        create_test(TestGasDistribution, "density_%s" % field,
+                    field_x="Density", field_y=field)
+    if field not in ("x-velocity", "Density"):
+        create_test(Test2DGasDistribution, "density_x-vel_%s" % field,
+                    field_x="Density", field_y="x-velocity", field_z=field,
+                    weight="CellMassMsun")


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -105,11 +105,19 @@
         --------
         >>> com = halos[0].center_of_mass()
         """
-        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
+        if self.CoM is not None:
+            return self.CoM
         pm = self["ParticleMassMsun"]
-        cx = (self["particle_position_x"] - c_vec[0])
-        cy = (self["particle_position_y"] - c_vec[1])
-        cz = (self["particle_position_z"] - c_vec[2])
+        cx = self["particle_position_x"]
+        cy = self["particle_position_y"]
+        cz = self["particle_position_z"]
+        if isinstance(self, FOFHalo):
+            c_vec = na.array([cx[0], cy[0], cz[0]]) - self.pf.domain_center
+        else:
+            c_vec = self.maximum_density_location() - self.pf.domain_center
+        cx = (cx - c_vec[0])
+        cy = (cy - c_vec[1])
+        cz = (cz - c_vec[2])
         com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
         return (com * pm).sum(axis=1) / pm.sum() + c_vec
 
@@ -123,6 +131,8 @@
         --------
         >>> max_dens = halos[0].maximum_density()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[0]
         return self._max_dens[self.id][0]
 
     def maximum_density_location(self):
@@ -135,6 +145,8 @@
         --------
         >>> max_dens_loc = halos[0].maximum_density_location()
         """
+        if self.max_dens_point is not None:
+            return self.max_dens_point[1:]
         return na.array([
                 self._max_dens[self.id][1],
                 self._max_dens[self.id][2],
@@ -150,6 +162,8 @@
         --------
         >>> halos[0].total_mass()
         """
+        if self.group_total_mass is not None:
+            return self.group_total_mass
         return self["ParticleMassMsun"].sum()
 
     def bulk_velocity(self):
@@ -162,6 +176,8 @@
         --------
         >>> bv = halos[0].bulk_velocity()
         """
+        if self.bulk_vel is not None:
+            return self.bulk_vel
         pm = self["ParticleMassMsun"]
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
@@ -180,6 +196,8 @@
         --------
         >>> rms_vel = halos[0].rms_velocity()
         """
+        if self.rms_vel is not None:
+            return self.rms_vel
         bv = self.bulk_velocity()
         pm = self["ParticleMassMsun"]
         sm = pm.sum()
@@ -211,6 +229,8 @@
         --------
         >>> radius = halos[0].maximum_radius()
         """
+        if self.max_radius is not None:
+            return self.max_radius
         if center_of_mass:
             center = self.center_of_mass()
         else:
@@ -266,6 +286,8 @@
         return sphere
 
     def get_size(self):
+        if self.size is not None:
+            return self.size
         return self.indices.size
 
     def write_particle_list(self, handle):
@@ -418,6 +440,7 @@
 
 
 class HOPHalo(Halo):
+    _name = "HOPHalo"
     pass
 
 
@@ -428,292 +451,6 @@
         "virial_info", "virial_bin", "virial_mass", "virial_radius",
         "rms_velocity"]
 
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[0]
-        max = self.comm.mpi_allreduce(self._max_dens[self.id][0], op='max')
-        return max
-
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        if self.max_dens_point is not None:
-            return self.max_dens_point[1:]
-        # If I own the maximum density, my location is globally correct.
-        max_dens = self.maximum_density()
-        if self._max_dens[self.id][0] == max_dens:
-            value = na.array([
-                self._max_dens[self.id][1],
-                self._max_dens[self.id][2],
-                self._max_dens[self.id][3]])
-        else:
-            value = na.array([0, 0, 0])
-        # This works, and isn't appropriate but for now will be fine...
-        value = self.comm.mpi_allreduce(value, op='sum')
-        return value
-
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        # If it's precomputed, we save time!
-        if self.CoM is not None:
-            return self.CoM
-        # This need to be called by all tasks, but not all will end up using
-        # it.
-        c_vec = self.maximum_density_location() - na.array([0.5, 0.5, 0.5])
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            cx = (self["particle_position_x"] - c_vec[0])
-            cy = (self["particle_position_y"] - c_vec[1])
-            cz = (self["particle_position_z"] - c_vec[2])
-            com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
-            my_mass = pm.sum()
-            my_com = ((com * pm).sum(axis=1) / my_mass + c_vec) * my_mass
-        else:
-            my_mass = 0.
-            my_com = na.array([0., 0., 0.])
-        global_mass = self.comm.mpi_allreduce(my_mass, op='sum')
-        global_com = self.comm.mpi_allreduce(my_com, op='sum')
-        return global_com / global_mass
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        if self.group_total_mass is not None:
-            return self.group_total_mass
-        if self.indices is not None:
-            my_mass = self["ParticleMassMsun"].sum()
-        else:
-            my_mass = 0.
-        global_mass = self.comm.mpi_allreduce(float(my_mass), op='sum')
-        return global_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        if self.bulk_vel is not None:
-            return self.bulk_vel
-        # Unf. this cannot be reasonably computed inside of parallelHOP because
-        # we don't pass velocities in.
-        if self.indices is not None:
-            pm = self["ParticleMassMsun"]
-            vx = (self["particle_velocity_x"] * pm).sum()
-            vy = (self["particle_velocity_y"] * pm).sum()
-            vz = (self["particle_velocity_z"] * pm).sum()
-            pm = pm.sum()
-        else:
-            pm = 0.
-            vx = 0.
-            vy = 0.
-            vz = 0.
-        bv = na.array([vx, vy, vz, pm])
-        global_bv = self.comm.mpi_allreduce(bv, op='sum')
-        return global_bv[:3] / global_bv[3]
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        if self.rms_vel is not None:
-            return self.rms_vel
-        bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
-        sm = pm.sum()
-        if self.indices is not None:
-            vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
-            vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
-            vz = (self["particle_velocity_z"] - bv[2]) * pm / sm
-            s = vx ** 2 + vy ** 2 + vz ** 2
-            s = na.sum(s)
-            size = vx.size
-            ss = na.array([s, float(size)])
-        else:
-            ss = na.array([0., 0.])
-        global_ss = self.comm.mpi_allreduce(ss, op='sum')
-        ms = global_ss[0] / global_ss[1]
-        return na.sqrt(ms) * global_ss[1]
-
-    def maximum_radius(self, center_of_mass=True):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when
-            calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        if self.max_radius is not None:
-            return self.max_radius
-        if center_of_mass:
-            center = self.center_of_mass()
-        else:
-            center = self.maximum_density_location()
-        DW = self.data.pf.domain_right_edge - self.data.pf.domain_left_edge
-        if self.indices is not None:
-            rx = na.abs(self["particle_position_x"] - center[0])
-            ry = na.abs(self["particle_position_y"] - center[1])
-            rz = na.abs(self["particle_position_z"] - center[2])
-            r = na.sqrt(na.minimum(rx, DW[0] - rx) ** 2.0
-                    + na.minimum(ry, DW[1] - ry) ** 2.0
-                    + na.minimum(rz, DW[2] - rz) ** 2.0)
-            my_max = r.max()
-
-        else:
-            my_max = 0.
-        return self.comm.mpi_allreduce(my_max, op='max')
-
-    def get_size(self):
-        if self.size is not None:
-            return self.size
-        if self.indices is not None:
-            my_size = self.indices.size
-        else:
-            my_size = 0
-        global_size = self.comm.mpi_allreduce(my_size, op='sum')
-        return global_size
-
-    def __getitem__(self, key):
-        if ytcfg.getboolean("yt", "inline") == False:
-            return self.data[key][self.indices]
-        else:
-            return self.data[key][self.indices]
-
-    def virial_mass(self, virial_overdensity=200., bins=300):
-        r"""Return the virial mass of the halo
-        in Msun, using only the particles
-        in the halo (no baryonic information used).
-
-        The virial mass is calculated, using the built in `Halo.virial_info`
-        functionality.  The mass is then returned.
-
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial mass. Default = 200.
-        bins : int
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        mass : float
-            The virial mass in solar masses of the particles in the halo.  -1
-            if not virialized.
-
-        Examples
-        --------
-        >>> vm = halos[0].virial_mass()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
-            bins=bins)
-        if vir_bin != -1:
-            return self.mass_bins[vir_bin]
-        else:
-            return -1
-
-    def virial_radius(self, virial_overdensity=200., bins=300):
-        r"""Return the virial radius of the halo in code units.
-
-        The virial radius of the halo is calculated, using only the particles
-        in the halo (no baryonic information used). Returns -1 if the halo is
-        not virialized.
-
-        Parameters
-        ----------
-        virial_overdensity : float
-            The overdensity threshold compared to the universal average when
-            calculating the virial radius. Default = 200.
-        bins : integer
-            The number of spherical bins used to calculate overdensities.
-            Default = 300.
-
-        Returns
-        -------
-        radius : float
-            The virial raius in code units of the particles in the halo.  -1
-            if not virialized.
-
-        Examples
-        --------
-        >>> vr = halos[0].virial_radius()
-        """
-        self.virial_info(bins=bins)
-        vir_bin = self.virial_bin(virial_overdensity=virial_overdensity,
-            bins=bins)
-        if vir_bin != -1:
-            return self.radial_bins[vir_bin]
-        else:
-            return -1
-
-    def virial_bin(self, virial_overdensity=200., bins=300):
-        r"""Returns the bin index of the virial radius of the halo. Generally,
-        it is better to call virial_radius instead, which calls this function
-        automatically.
-        """
-        self.virial_info(bins=bins)
-        over = (self.overdensity > virial_overdensity)
-        if (over == True).any():
-            vir_bin = max(na.arange(bins + 1)[over])
-            return vir_bin
-        else:
-            return -1
-
     def virial_info(self, bins=300):
         r"""Calculates the virial information for the halo. Generally, it is
         better to call virial_radius or virial_mass instead, which calls this
@@ -781,27 +518,6 @@
 
 class FOFHalo(Halo):
 
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        pm = self["ParticleMassMsun"]
-        cx = self["particle_position_x"]
-        cy = self["particle_position_y"]
-        cz = self["particle_position_z"]
-        c_vec = na.array([cx[0], cy[0], cz[0]]) - na.array([0.5, 0.5, 0.5])
-        cx = cx - c_vec[0]
-        cy = cy - c_vec[1]
-        cz = cz - c_vec[2]
-        com = na.array([v - na.floor(v) for v in [cx, cy, cz]])
-        com = (pm * com).sum(axis=1) / pm.sum() + c_vec
-        return com
-
     def maximum_density(self):
         r"""Not implemented."""
         return -1
@@ -905,100 +621,6 @@
             del f
         return field_data
 
-    def center_of_mass(self):
-        r"""Calculate and return the center of mass.
-
-        The center of mass of the halo is directly calculated and returned.
-
-        Examples
-        --------
-        >>> com = halos[0].center_of_mass()
-        """
-        return self.CoM
-
-    def maximum_density_location(self):
-        r"""Return the location HOP identified as maximally dense.
-
-        Return the location HOP identified as maximally dense.
-
-        Examples
-        --------
-        >>> max_dens_loc = halos[0].maximum_density_location()
-        """
-        return self.max_dens_point[1:]
-
-    def maximum_density(self):
-        r"""Return the HOP-identified maximum density.
-
-        Return the HOP-identified maximum density.
-
-        Examples
-        --------
-        >>> max_dens = halos[0].maximum_density()
-        """
-        return self.max_dens_point[0]
-
-    def total_mass(self):
-        r"""Returns the total mass in solar masses of the halo.
-
-        Returns the total mass in solar masses of just the particles in the
-        halo.
-
-        Examples
-        --------
-        >>> halos[0].total_mass()
-        """
-        return self.group_total_mass
-
-    def bulk_velocity(self):
-        r"""Returns the mass-weighted average velocity in cm/s.
-
-        This calculates and returns the mass-weighted average velocity of just
-        the particles in the halo in cm/s.
-
-        Examples
-        --------
-        >>> bv = halos[0].bulk_velocity()
-        """
-        return self.bulk_vel
-
-    def rms_velocity(self):
-        r"""Returns the mass-weighted RMS velocity for the halo
-        particles in cgs units.
-
-        Calculate and return the mass-weighted RMS velocity for just the
-        particles in the halo.  The bulk velocity of the halo is subtracted
-        before computation.
-
-        Examples
-        --------
-        >>> rms_vel = halos[0].rms_velocity()
-        """
-        return self.rms_vel
-
-    def maximum_radius(self):
-        r"""Returns the maximum radius in the halo for all particles,
-        either from the point of maximum density or from the
-        center of mass.
-
-        The maximum radius from the most dense point is calculated.  This
-        accounts for periodicity.
-
-        Parameters
-        ----------
-        center_of_mass : bool
-            True chooses the center of mass when
-            calculating the maximum radius.
-            False chooses from the maximum density location for HOP halos
-            (it has no effect for FOF halos).
-            Default = True.
-
-        Examples
-        --------
-        >>> radius = halos[0].maximum_radius()
-        """
-        return self.max_radius
-
     def get_sphere(self):
         r"""Returns a sphere source.
 


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -367,7 +367,10 @@
 
     #@time_execution
     def __fill_child_mask(self, child, mask, tofill):
-        rf = self.pf.refine_by
+        try:
+            rf = self.pf.refine_by[child.Level-1]
+        except TypeError:
+            rf = self.pf.refine_by
         gi, cgi = self.get_global_startindex(), child.get_global_startindex()
         startIndex = na.maximum(0, cgi / rf - gi)
         endIndex = na.minimum((cgi + child.ActiveDimensions) / rf - gi,


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -68,16 +68,32 @@
         self.Parent = []
         self.Children = []
         self.Level = level
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
         self.ActiveDimensions = stop - start + 1
 
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+        
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return na.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by[self.Level-1]).astype('int64').ravel()
+        return self.start_index
+
     def _setup_dx(self):
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id - self._id_offset
         if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
+            self.dds = self.Parent[0].dds / self.pf.refine_by[self.Level-1]
         else:
             LE, RE = self.hierarchy.grid_left_edge[id,:], \
                      self.hierarchy.grid_right_edge[id,:]
@@ -91,8 +107,8 @@
     grid = ChomboGrid
     
     def __init__(self,pf,data_style='chombo_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge # need these to determine absolute grid locations
-        self.domain_right_edge = pf.domain_right_edge # need these to determine absolute grid locations
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
         self.data_style = data_style
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
@@ -100,12 +116,11 @@
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.hierarchy = os.path.abspath(self.hierarchy_filename)
         self.directory = os.path.dirname(self.hierarchy_filename)
-        self._fhandle = h5py.File(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename, 'r')
 
         self.float_type = self._fhandle['/level_0']['data:datatype=0'].dtype.name
-        self._levels = [fn for fn in self._fhandle if fn != "Chombo_global"]
+        self._levels = self._fhandle.keys()[1:]
         AMRHierarchy.__init__(self,pf,data_style)
-
         self._fhandle.close()
 
     def _initialize_data_storage(self):
@@ -113,7 +128,7 @@
 
     def _detect_fields(self):
         ncomp = int(self._fhandle['/'].attrs['num_components'])
-        self.field_list = [c[1] for c in self._fhandle['/'].attrs.listitems()[-ncomp:]]
+        self.field_list = [c[1] for c in self._fhandle['/'].attrs.items()[-ncomp:]]
     
     def _setup_classes(self):
         dd = self._get_data_reader_dict()
@@ -130,8 +145,8 @@
         
         # this relies on the first Group in the H5 file being
         # 'Chombo_global'
-        levels = [fn for fn in f if fn != "Chombo_global"]
-        self.grids = []
+        levels = f.keys()[1:]
+        grids = []
         i = 0
         for lev in levels:
             level_number = int(re.match('level_(\d+)',lev).groups()[0])
@@ -140,17 +155,18 @@
             for level_id, box in enumerate(boxes):
                 si = na.array([box['lo_%s' % ax] for ax in 'ijk'])
                 ei = na.array([box['hi_%s' % ax] for ax in 'ijk'])
-                pg = self.grid(len(self.grids),self,level=level_number,
+                pg = self.grid(len(grids),self,level=level_number,
                                start = si, stop = ei)
-                self.grids.append(pg)
-                self.grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = dx*si.astype(self.float_type) + self.domain_left_edge
-                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1) + self.domain_left_edge
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = dx*si.astype(self.float_type)
+                self.grid_right_edge[i] = dx*(ei.astype(self.float_type)+1)
                 self.grid_particle_count[i] = 0
                 self.grid_dimensions[i] = ei - si + 1
                 i += 1
         self.grids = na.empty(len(grids), dtype='object')
         for gi, g in enumerate(grids): self.grids[gi] = g
+#        self.grids = na.array(self.grids, dtype='object')
 
     def _populate_grid_objects(self):
         for g in self.grids:
@@ -179,8 +195,8 @@
     
     def __init__(self, filename, data_style='chombo_hdf5',
                  storage_filename = None, ini_filename = None):
-        # hardcoded for now 
-        self.current_time = 0.0
+        fileh = h5py.File(filename,'r')
+        self.current_time = fileh.attrs['time']
         self.ini_filename = ini_filename
         StaticOutput.__init__(self,filename,data_style)
         self.storage_filename = storage_filename
@@ -230,15 +246,18 @@
         """
         if os.path.isfile('pluto.ini'):
             self._parse_pluto_file('pluto.ini')
-        elif os.path.isfile('orion2.ini'):
-            self._parse_pluto_file('orion2.ini')
         else:
+            if os.path.isfile('orion2.ini'): self._parse_pluto_file('orion2.ini')
             self.unique_identifier = \
-                                   int(os.stat(self.parameter_filename)[ST_CTIME])
-            self.domain_left_edge = na.array([0.,0.,0.])
+                int(os.stat(self.parameter_filename)[ST_CTIME])
+            self.domain_left_edge = self.__calc_left_edge()
             self.domain_right_edge = self.__calc_right_edge()
+            self.domain_dimensions = self.__calc_domain_dimensions()
             self.dimensionality = 3
-            self.refine_by = 2
+            self.refine_by = []
+            fileh = h5py.File(self.parameter_filename,'r')
+            for level in range(0,fileh.attrs['num_levels']):
+                self.refine_by.append(fileh['/level_'+str(level)].attrs['ref_ratio'])
 
     def _parse_pluto_file(self, ini_filename):
         """
@@ -268,36 +287,26 @@
                     else:
                         self.parameters[paramName] = t
 
-            # assumes 3D for now
-            elif param.startswith("X1-grid"):
-                t = vals.split()
-                low1 = float(t[1])
-                high1 = float(t[4])
-                N1 = int(t[2])
-            elif param.startswith("X2-grid"):
-                t = vals.split()
-                low2 = float(t[1])
-                high2 = float(t[4])
-                N2 = int(t[2])
-            elif param.startswith("X3-grid"):
-                t = vals.split()
-                low3 = float(t[1])
-                high3 = float(t[4])
-                N3 = int(t[2])
+    def __calc_left_edge(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        dx0 = fileh['/level_0'].attrs['dx']
+        LE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        fileh.close()
+        return LE
 
-        self.dimensionality = 3
-        self.domain_left_edge = na.array([low1,low2,low3])
-        self.domain_right_edge = na.array([high1,high2,high3])
-        self.domain_dimensions = na.array([N1,N2,N3])
-        self.refine_by = self.parameters["RefineBy"]
-            
     def __calc_right_edge(self):
         fileh = h5py.File(self.parameter_filename,'r')
         dx0 = fileh['/level_0'].attrs['dx']
-        RE = dx0*((na.array(fileh['/level_0'].attrs['prob_domain']))[3:] + 1)
+        RE = dx0*((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
         fileh.close()
         return RE
-                   
+                  
+    def __calc_domain_dimensions(self):
+        fileh = h5py.File(self.parameter_filename,'r')
+        L_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:3])
+        R_index = ((na.array(list(fileh['/level_0'].attrs['prob_domain'])))[3:] + 1)
+        return R_index - L_index
+ 
     @classmethod
     def _is_valid(self, *args, **kwargs):
         try:
@@ -309,7 +318,6 @@
             pass
         return False
 
-
     @parallel_root_only
     def print_key_parameters(self):
         for a in ["current_time", "domain_dimensions", "domain_left_edge",


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -38,45 +38,64 @@
 add_chombo_field = KnownChomboFields.add_field
 
 ChomboFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_chombo_field = ChomboFieldInfo.add_field
+add_field = ChomboFieldInfo.add_field
 
-add_field = add_chombo_field
+add_chombo_field("density", function=NullFunc, take_log=True,
+                 validators = [ValidateDataField("density")],
+                 units=r"\rm{g}/\rm{cm}^3")
 
-add_field("density", function=NullFunc, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
+KnownChomboFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-ChomboFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_chombo_field("X-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Momentum")],
+                 units=r"",display_name=r"M_x")
+KnownChomboFields["X-momentum"]._projected_units=r""
 
-add_field("X-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Momentum")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-momentum"]._projected_units=r""
+add_chombo_field("Y-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Momentum")],
+                 units=r"",display_name=r"M_y")
+KnownChomboFields["Y-momentum"]._projected_units=r""
 
-add_field("Y-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Momentum")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-momentum"]._projected_units=r""
+add_chombo_field("Z-momentum", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Z-Momentum")],
+                 units=r"",display_name=r"M_z")
+KnownChomboFields["Z-momentum"]._projected_units=r""
 
-add_field("Z-momentum", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Momentum")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-momentum"]._projected_units=r""
+add_chombo_field("X-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("X-Magnfield")],
+                 units=r"",display_name=r"B_x")
+KnownChomboFields["X-magnfield"]._projected_units=r""
 
-add_field("X-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("X-Magnfield")],
-          units=r"",display_name=r"B_x")
-ChomboFieldInfo["X-magnfield"]._projected_units=r""
+add_chombo_field("Y-magnfield", function=NullFunc, take_log=False,
+                 validators = [ValidateDataField("Y-Magnfield")],
+                 units=r"",display_name=r"B_y")
+KnownChomboFields["Y-magnfield"]._projected_units=r""
 
-add_field("Y-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Y-Magnfield")],
-          units=r"",display_name=r"B_y")
-ChomboFieldInfo["Y-magnfield"]._projected_units=r""
+add_chombo_field("Z-magnfield", function=NullFunc, take_log=False,
+                  validators = [ValidateDataField("Z-Magnfield")],
+                  units=r"",display_name=r"B_z")
+KnownChomboFields["Z-magnfield"]._projected_units=r""
 
-add_field("Z-magnfield", function=NullFunc, take_log=False,
-          validators = [ValidateDataField("Z-Magnfield")],
-          units=r"",display_name=r"B_z")
-ChomboFieldInfo["Z-magnfield"]._projected_units=r""
+add_chombo_field("energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["energy-density"]._projected_units =r""
+
+add_chombo_field("radiation-energy-density", function=lambda a,b: None, take_log=True,
+                 validators = [ValidateDataField("radiation-energy-density")],
+                 units=r"\rm{erg}/\rm{cm}^3")
+KnownChomboFields["radiation-energy-density"]._projected_units =r""
+
+def _Density(field,data):
+    """A duplicate of the density field. This is needed because when you try 
+    to instantiate a PlotCollection without passing in a center, the code
+    will try to generate one for you using the "Density" field, which gives an error 
+    if it isn't defined.
+
+    """
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
 
 def _MagneticEnergy(field,data):
     return (data["X-magnfield"]**2 +


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -36,7 +36,7 @@
 
     def _field_dict(self,fhandle):
         ncomp = int(fhandle['/'].attrs['num_components'])
-        temp =  fhandle['/'].attrs.listitems()[-ncomp:]
+        temp =  fhandle['/'].attrs.items()[-ncomp:]
         val, keys = zip(*temp)
         val = [int(re.match('component_(\d+)',v).groups()[0]) for v in val]
         return dict(zip(keys,val))
@@ -45,7 +45,7 @@
         fhandle = h5py.File(grid.filename,'r')
         ncomp = int(fhandle['/'].attrs['num_components'])
 
-        fns = [c[1] for c in f['/'].attrs.listitems()[-ncomp:]]
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
         fhandle.close()
     
     def _read_data_set(self,grid,field):
@@ -64,7 +64,6 @@
 
         fhandle.close()
         return data.reshape(dims, order='F')
-                                          
 
     def _read_data_slice(self, grid, field, axis, coord):
         sl = [slice(None), slice(None), slice(None)]


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/frontends/orion/fields.py
--- a/yt/frontends/orion/fields.py
+++ b/yt/frontends/orion/fields.py
@@ -44,26 +44,26 @@
 OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
 add_field = OrionFieldInfo.add_field
 
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-OrionFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+add_orion_field("density", function=lambda a,b: None, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
 
-add_field("eden", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("eden")],
-          units=r"\rm{erg}/\rm{cm}^3")
+add_orion_field("eden", function=lambda a,b: None, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
 
-add_field("xmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("xmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("xmom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
-add_field("ymom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("ymom")],
-          units=r"\rm{gm}/\rm{cm^2\ s}")
+add_orion_field("ymom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
 
-add_field("zmom", function=lambda a,b: None, take_log=False,
-          validators = [ValidateDataField("zmom")],
-          units=r"\rm{g}/\rm{cm^2\ s}")
+add_orion_field("zmom", function=lambda a,b: None, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
 
 translation_dict = {"x-velocity": "xvel",
                     "y-velocity": "yvel",
@@ -88,11 +88,11 @@
 
 def _xVelocity(field, data):
     """generate x-velocity from x-momentum and density
-
+    
     """
     return data["xmom"]/data["density"]
-add_field("x-velocity",function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _yVelocity(field,data):
     """generate y-velocity from y-momentum and density
@@ -102,16 +102,16 @@
     #    return data["xvel"]
     #except KeyError:
     return data["ymom"]/data["density"]
-add_field("y-velocity",function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _zVelocity(field,data):
     """generate z-velocity from z-momentum and density
-
+    
     """
     return data["zmom"]/data["density"]
-add_field("z-velocity",function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
 
 def _ThermalEnergy(field, data):
     """generate thermal (gas energy). Dual Energy Formalism was
@@ -125,19 +125,19 @@
         data["x-velocity"]**2.0
         + data["y-velocity"]**2.0
         + data["z-velocity"]**2.0 )
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
+add_orion_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
 
 def _Pressure(field,data):
     """M{(Gamma-1.0)*e, where e is thermal energy density
        NB: this will need to be modified for radiation
     """
     return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+add_orion_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
 
 def _Temperature(field,data):
     return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
-add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+add_orion_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
 
 # particle fields
 
@@ -170,6 +170,6 @@
 
 for pf in _particle_field_list:
     pfunc = particle_func("particle_%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
+    add_orion_field("particle_%s" % pf, function=pfunc,
+                    validators = [ValidateSpatial(0)],
+                    particle_type=True)




diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/gui/reason/html/js/widget_plotwindow.js
--- a/yt/gui/reason/html/js/widget_plotwindow.js
+++ b/yt/gui/reason/html/js/widget_plotwindow.js
@@ -145,11 +145,13 @@
                         id: "img_" + this.id,
                         width: 400,
                         height: 400,
+                        draggable: false,
                     },
                     x: 100,
                     y: 10,
                     width: 400,
                     height: 400,
+                    draggable: false,
                     listeners: {
                         afterrender: function(c){
                             c.el.on('click', function(e){
@@ -163,6 +165,25 @@
                                 yt_rpc.ExtDirectREPL.execute(
                                 {code:cc, hide:true}, cell_finished); 
                             });
+                            c.el.on('mousedown', function(e){
+                                c.drag_start = true;
+                                c.drag_start_pos = e.getXY();
+                            });
+                            c.el.on('mouseup', function(e){
+                                c.drag_start = false;
+                                drag_stop = e.getXY();
+                                delta_x = drag_stop[0] - c.drag_start_pos[0];
+                                delta_y = drag_stop[1] - c.drag_start_pos[1];
+                                if (((delta_x < -10) || (delta_x > 10)) ||
+                                    ((delta_y < -10) || (delta_y > 10))) {
+                                    rel_x = -delta_x / 400;
+                                    rel_y = -delta_y / 400;
+                                    cc = python_varname + '.pan_rel((' + 
+                                        rel_x + ',' + rel_y + '))';
+                                    yt_rpc.ExtDirectREPL.execute(
+                                    {code:cc, hide:true}, cell_finished); 
+                                }
+                            });
                         }
                     }
                 }, {


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -250,7 +250,7 @@
         if n_unique > my_max:
             best_dim = dim
             my_max = n_unique
-            my_split = (n_unique-1)/2
+            my_split = (n_unique)/2
     # I recognize how lame this is.
     cdef np.ndarray[np.float64_t, ndim=1] tarr = np.empty(my_max, dtype='float64')
     for i in range(my_max):


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -154,6 +154,9 @@
     @wraps(func)
     def single_proc_results(self, *args, **kwargs):
         retval = None
+        if hasattr(self, "dont_wrap"):
+            if func.func_name in self.dont_wrap:
+                return func(self, *args, **kwargs)
         if self._processing or not self._distributed:
             return func(self, *args, **kwargs)
         comm = _get_comm((self,))


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/visualization/eps_writer.py
--- a/yt/visualization/eps_writer.py
+++ b/yt/visualization/eps_writer.py
@@ -251,7 +251,8 @@
 
 #=============================================================================
 
-    def axis_box_yt(self, plot, units=None, bare_axes=False, **kwargs):
+    def axis_box_yt(self, plot, units=None, bare_axes=False,
+                    tickcolor=None, **kwargs):
         r"""Wrapper around DualEPS.axis_box to automatically fill in the
         axis ranges and labels from a yt plot.
 
@@ -296,7 +297,8 @@
                 units = units.replace('mpc', 'Mpc')
                 _xlabel = '%s (%s)' % (x_names[plot.data.axis], units)
                 _ylabel = '%s (%s)' % (y_names[plot.data.axis], units)
-            _tickcolor = pyx.color.cmyk.white
+            if tickcolor == None:
+                _tickcolor = pyx.color.cmyk.white
         else:
             _xrange = plot._axes.get_xlim()
             _yrange = plot._axes.get_ylim()
@@ -308,7 +310,10 @@
             else:
                 _xlabel = plot._x_label
                 _ylabel = plot._y_label
-            _tickcolor = None
+            if tickcolor == None:
+                _tickcolor = None
+        if tickcolor != None:
+            _tickcolor = tickcolor
         self.axis_box(xrange=_xrange, yrange=_yrange, xlabel=_xlabel,
                       ylabel=_ylabel, tickcolor=_tickcolor, xlog=_xlog,
                       ylog=_ylog, bare_axes=bare_axes, **kwargs)
@@ -350,7 +355,7 @@
 
 #=============================================================================
 
-    def insert_image_yt(self, plot, pos=(0,0)):
+    def insert_image_yt(self, plot, pos=(0,0), scale=1.0):
         r"""Inserts a bitmap taken from a yt plot.
 
         Parameters
@@ -398,8 +403,8 @@
                                  figure_canvas.tostring_rgb())
         #figure_canvas.print_png('test.png')
         self.canvas.insert(pyx.bitmap.bitmap(pos[0], pos[1], image,
-                                             width=self.figsize[0],
-                                             height=self.figsize[1]))
+                                             width=scale*self.figsize[0],
+                                             height=scale*self.figsize[1]))
 
 #=============================================================================
 
@@ -871,44 +876,43 @@
                 if cb_flags != None:
                     if cb_flags[index] == False:
                         continue
-                if _yt or colorbars[index] != None:
-                    if ncol == 1:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif i == 0:
-                        orientation = "left"
-                        xpos = bbox[0]
-                        ypos = ypos0
-                    elif i+1 == ncol:
-                        orientation = "right"
-                        xpos = bbox[1]
-                        ypos = ypos0
-                    elif j == 0:
-                        orientation = "bottom"
-                        ypos = bbox[2]
-                        xpos = xpos0
-                    elif j+1 == nrow:
-                        orientation = "top"
-                        ypos = bbox[3]
-                        xpos = xpos0
+                if ncol == 1:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j == 0:
+                    orientation = "bottom"
+                    ypos = bbox[2]
+                    xpos = xpos0
+                elif i == 0:
+                    orientation = "left"
+                    xpos = bbox[0]
+                    ypos = ypos0
+                elif i+1 == ncol:
+                    orientation = "right"
+                    xpos = bbox[1]
+                    ypos = ypos0
+                elif j+1 == nrow:
+                    orientation = "top"
+                    ypos = bbox[3]
+                    xpos = xpos0
+                else:
+                    orientation = None  # Marker for interior plot
+
+                if orientation != None:
+                    if _yt:
+                        d.colorbar_yt(yt_plots[index],
+                                      pos=[xpos,ypos],
+                                      shrink=shrink_cb,
+                                      orientation=orientation)
                     else:
-                        orientation = None  # Marker for interior plot
-
-                    if orientation != None:
-                        if _yt:
-                            d.colorbar_yt(yt_plots[index],
-                                          pos=[xpos,ypos],
-                                          shrink=shrink_cb,
-                                          orientation=orientation)
-                        else:
-                            d.colorbar(colorbars[index]["cmap"],
-                                       zrange=colorbars[index]["range"],
-                                       label=colorbars[index]["name"],
-                                       log=colorbars[index]["log"],
-                                       orientation=orientation,
-                                       pos=[xpos,ypos],
-                                       shrink=shrink_cb)
+                        d.colorbar(colorbars[index]["cmap"],
+                                   zrange=colorbars[index]["range"],
+                                   label=colorbars[index]["name"],
+                                   log=colorbars[index]["log"],
+                                   orientation=orientation,
+                                   pos=[xpos,ypos],
+                                   shrink=shrink_cb)
 
     if savefig != None:
         d.save_fig(savefig, format=format)
@@ -958,7 +962,7 @@
 #=============================================================================
 
 def single_plot(plot, figsize=(12,12), cb_orient="right", bare_axes=False,
-                savefig=None, file_format='eps'):
+                savefig=None, colorbar=True, file_format='eps', **kwargs):
     r"""Wrapper for DualEPS routines to create a figure directy from a yt
     plot.  Calls insert_image_yt, axis_box_yt, and colorbar_yt.
 
@@ -975,6 +979,8 @@
         Set to true to have no annotations or tick marks on all of the axes.
     savefig : string
         Name of the saved file without the extension.
+    colorbar : boolean
+        Set to true to include a colorbar
     file_format : string
         Format type.  Can be "eps" or "pdf"
 
@@ -986,8 +992,9 @@
     """
     d = DualEPS(figsize=figsize)
     d.insert_image_yt(plot)
-    d.axis_box_yt(plot, bare_axes=bare_axes)
-    d.colorbar_yt(plot, orientation=cb_orient)
+    d.axis_box_yt(plot, bare_axes=bare_axes, **kwargs)
+    if colorbar:
+        d.colorbar_yt(plot, orientation=cb_orient)
     if savefig != None:
         d.save_fig(savefig, format=file_format)
     return d


diff -r 310228a7ac89079959c00343635cb9a41f5f3063 -r 0cb6471b405f1583c635b693832c53103607e478 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -426,7 +426,7 @@
         y = raw_data['py']
         z = raw_data[field]
         if logit: z = na.log10(z)
-        fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()
+        fvals = triang(x,y).nn_interpolator(z)(xi,yi).transpose()[::-1,:]
 
         fig = Figure((vi/100.0, vj/100.0), dpi = 100)
         fig.figimage(img)



https://bitbucket.org/yt_analysis/yt/changeset/d1806f01fa85/
changeset:   d1806f01fa85
branch:      yt
user:        brittonsmith
date:        2012-03-13 16:09:48
summary:     Removed some commented out lines.
affected #:  1 file

diff -r 0cb6471b405f1583c635b693832c53103607e478 -r d1806f01fa85906ba095319487e09e8f7dff2f60 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -276,9 +276,6 @@
         bin_indices_x = args[1].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        # Bin1DProfile(bin_indices_x, weight_data, source_data,
-        #              weight_field, binned_field,
-        #              m_field, q_field, used_field)
         bin_profile1d(bin_indices_x, weight_data, source_data,
                       weight_field, binned_field,
                       m_field, q_field, used_field)



https://bitbucket.org/yt_analysis/yt/changeset/4b64f344ee99/
changeset:   4b64f344ee99
branch:      yt
user:        MatthewTurk
date:        2012-03-13 19:37:09
summary:     Ported over 2D and 3D profiling routines to Cython.  Removed them from the old
.c file.
affected #:  3 files

diff -r d1806f01fa85906ba095319487e09e8f7dff2f60 -r 4b64f344ee99da93c1d9b2325d72056ea253b222 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -31,9 +31,7 @@
 from yt.funcs import *
 
 from yt.data_objects.data_containers import YTFieldData
-from yt.utilities.amr_utils import bin_profile1d
-from yt.utilities.data_point_utilities import \
-    Bin1DProfile, Bin2DProfile, Bin3DProfile
+from yt.utilities.amr_utils import bin_profile1d, bin_profile2d, bin_profile3d
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
 
@@ -123,7 +121,7 @@
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            #self["%s_std" % field] = na.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used
 
@@ -442,6 +440,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -450,8 +450,8 @@
         weight_data = weight_data[mi]
         nx = bin_indices_x.size
         #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+                      weight_field, binned_field, m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -459,7 +459,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=0)
             if accumulation[1]:
                 binned_field = na.add.accumulate(binned_field, axis=1)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):
@@ -694,6 +695,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1].ravel().astype('int64')
@@ -701,10 +704,9 @@
         bin_indices_z = args[3].ravel().astype('int64')
         source_data = source_data[mi]
         weight_data = weight_data[mi]
-        Bin3DProfile(
-            bin_indices_x, bin_indices_y, bin_indices_z,
-            weight_data, source_data,
-            weight_field, binned_field, used_field)
+        bin_profile3d(bin_indices_x, bin_indices_y, bin_indices_z,
+                      weight_data, source_data, weight_field, binned_field,
+                      m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")
@@ -714,7 +716,8 @@
                 binned_field = na.add.accumulate(binned_field, axis=1)
             if accumulation[2]:
                 binned_field = na.add.accumulate(binned_field, axis=2)
-        return binned_field, weight_field, used_field.astype('bool')
+        return binned_field, weight_field, q_field, \
+            used_field.astype("bool")
 
     @preserve_source_parameters
     def _get_bins(self, source, check_cut=False):


diff -r d1806f01fa85906ba095319487e09e8f7dff2f60 -r 4b64f344ee99da93c1d9b2325d72056ea253b222 yt/utilities/_amr_utils/misc_utilities.pyx
--- a/yt/utilities/_amr_utils/misc_utilities.pyx
+++ b/yt/utilities/_amr_utils/misc_utilities.pyx
@@ -31,14 +31,17 @@
     # NOTE that size_t might not be int
     void *alloca(int)
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def bin_profile1d(np.ndarray[np.int64_t, ndim=1] bins_x,
-		  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
                   np.ndarray[np.float64_t, ndim=1] bsource,
                   np.ndarray[np.float64_t, ndim=1] wresult,
-	          np.ndarray[np.float64_t, ndim=1] bresult,
-		  np.ndarray[np.float64_t, ndim=1] mresult,
-		  np.ndarray[np.float64_t, ndim=1] qresult,
-	          np.ndarray[np.float64_t, ndim=1] used):
+                  np.ndarray[np.float64_t, ndim=1] bresult,
+                  np.ndarray[np.float64_t, ndim=1] mresult,
+                  np.ndarray[np.float64_t, ndim=1] qresult,
+                  np.ndarray[np.float64_t, ndim=1] used):
     cdef int n
     cdef np.int64_t bin
     cdef np.float64_t wval, bval
@@ -56,6 +59,62 @@
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
+def bin_profile2d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=2] wresult,
+                  np.ndarray[np.float64_t, ndim=2] bresult,
+                  np.ndarray[np.float64_t, ndim=2] mresult,
+                  np.ndarray[np.float64_t, ndim=2] qresult,
+                  np.ndarray[np.float64_t, ndim=2] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj] += (wresult[bini, binj] * wval * (bval - mresult[bini, binj])**2) / \
+            (wresult[bini, binj] + wval)
+        wresult[bini, binj] += wval
+        bresult[bini, binj] += wval*bval
+        mresult[bini, binj] += wval * (bval - mresult[bini, binj]) / wresult[bini, binj]
+        used[bini, binj] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def bin_profile3d(np.ndarray[np.int64_t, ndim=1] bins_x,
+                  np.ndarray[np.int64_t, ndim=1] bins_y,
+                  np.ndarray[np.int64_t, ndim=1] bins_z,
+                  np.ndarray[np.float64_t, ndim=1] wsource,
+                  np.ndarray[np.float64_t, ndim=1] bsource,
+                  np.ndarray[np.float64_t, ndim=3] wresult,
+                  np.ndarray[np.float64_t, ndim=3] bresult,
+                  np.ndarray[np.float64_t, ndim=3] mresult,
+                  np.ndarray[np.float64_t, ndim=3] qresult,
+                  np.ndarray[np.float64_t, ndim=3] used):
+    cdef int n
+    cdef np.int64_t bin
+    cdef np.float64_t wval, bval
+    for n in range(bins_x.shape[0]):
+        bini = bins_x[n]
+        binj = bins_y[n]
+        bink = bins_z[n]
+        bval = bsource[n]
+        wval = wsource[n]
+        qresult[bini, binj, bink] += (wresult[bini, binj, bink] * wval * (bval - mresult[bini, binj, bink])**2) / \
+            (wresult[bini, binj, bink] + wval)
+        wresult[bini, binj, bink] += wval
+        bresult[bini, binj, bink] += wval*bval
+        mresult[bini, binj, bink] += wval * (bval - mresult[bini, binj, bink]) / wresult[bini, binj, bink]
+        used[bini, binj, bink] = 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
 def get_color_bounds(np.ndarray[np.float64_t, ndim=1] px,
                      np.ndarray[np.float64_t, ndim=1] py,
                      np.ndarray[np.float64_t, ndim=1] pdx,


diff -r d1806f01fa85906ba095319487e09e8f7dff2f60 -r 4b64f344ee99da93c1d9b2325d72056ea253b222 yt/utilities/data_point_utilities.c
--- a/yt/utilities/data_point_utilities.c
+++ b/yt/utilities/data_point_utilities.c
@@ -273,359 +273,6 @@
 
 }
 
-static PyObject *_profile1DError;
-
-static PyObject *Py_Bin1DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOO",
-                &obins_x, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile1DError,
-                "Bin1DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1,1,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile1DError,
-             "Bin1DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR1(wresult, i) += wval;
-      *(npy_float64*)PyArray_GETPTR1(bresult, i) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR1(used, i) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile2DError;
-
-static PyObject *Py_Bin2DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j;
-    PyObject *obins_x, *obins_y, *owsource, *obsource, *owresult, *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *wsource, *bsource, *wresult, *bresult, *used;
-    bins_x = bins_y = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOO",
-                &obins_x, &obins_y, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile2DError,
-                "Bin2DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 2,2,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile2DError,
-             "Bin2DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR2(wresult, i, j) += wval;
-      *(npy_float64*)PyArray_GETPTR2(bresult, i, j) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR2(used, i, j) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
-static PyObject *_profile3DError;
-
-static PyObject *Py_Bin3DProfile(PyObject *obj, PyObject *args)
-{
-    int i, j, k;
-    PyObject *obins_x, *obins_y, *obins_z, *owsource, *obsource, *owresult,
-             *obresult, *oused;
-    PyArrayObject *bins_x, *bins_y, *bins_z, *wsource, *bsource, *wresult,
-                  *bresult, *used;
-    bins_x = bins_y = bins_z = wsource = bsource = wresult = bresult = used = NULL;
-
-    if (!PyArg_ParseTuple(args, "OOOOOOOO",
-                &obins_x, &obins_y, &obins_z, &owsource, &obsource,
-                &owresult, &obresult, &oused))
-        return PyErr_Format(_profile3DError,
-                "Bin3DProfile: Invalid parameters.");
-    i = 0;
-
-    bins_x = (PyArrayObject *) PyArray_FromAny(obins_x,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if(bins_x==NULL) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_x.");
-    goto _fail;
-    }
-    
-    bins_y = (PyArrayObject *) PyArray_FromAny(obins_y,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_y==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_y))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_y, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bins_z = (PyArrayObject *) PyArray_FromAny(obins_z,
-                    PyArray_DescrFromType(NPY_INT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bins_z==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bins_z))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bins_z, same size as bins_x.");
-    goto _fail;
-    }
-    
-    wsource = (PyArrayObject *) PyArray_FromAny(owsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((wsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(wsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for wsource, same size as bins_x.");
-    goto _fail;
-    }
-    
-    bsource = (PyArrayObject *) PyArray_FromAny(obsource,
-                    PyArray_DescrFromType(NPY_FLOAT64), 1, 1,
-                    NPY_IN_ARRAY, NULL);
-    if((bsource==NULL) || (PyArray_SIZE(bins_x) != PyArray_SIZE(bsource))) {
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: One dimension required for bsource, same size as bins_x.");
-    goto _fail;
-    }
-
-    wresult = (PyArrayObject *) PyArray_FromAny(owresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if(wresult==NULL){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for wresult.");
-    goto _fail;
-    }
-
-    bresult = (PyArrayObject *) PyArray_FromAny(obresult,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((bresult==NULL) ||(PyArray_SIZE(wresult) != PyArray_SIZE(bresult))
-       || (PyArray_DIM(bresult,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for bresult, same shape as wresult.");
-    goto _fail;
-    }
-    
-    used = (PyArrayObject *) PyArray_FromAny(oused,
-                    PyArray_DescrFromType(NPY_FLOAT64), 3,3,
-                    NPY_INOUT_ARRAY | NPY_UPDATEIFCOPY, NULL);
-    if((used==NULL) ||(PyArray_SIZE(used) != PyArray_SIZE(wresult))
-       || (PyArray_DIM(used,0) != PyArray_DIM(wresult,0))){
-    PyErr_Format(_profile3DError,
-             "Bin3DProfile: Two dimensions required for used, same shape as wresult.");
-    goto _fail;
-    }
-
-    npy_float64 wval, bval;
-    int n;
-
-    for(n=0; n<bins_x->dimensions[0]; n++) {
-      i = *(npy_int64*)PyArray_GETPTR1(bins_x, n);
-      j = *(npy_int64*)PyArray_GETPTR1(bins_y, n);
-      k = *(npy_int64*)PyArray_GETPTR1(bins_z, n);
-      bval = *(npy_float64*)PyArray_GETPTR1(bsource, n);
-      wval = *(npy_float64*)PyArray_GETPTR1(wsource, n);
-      *(npy_float64*)PyArray_GETPTR3(wresult, i, j, k) += wval;
-      *(npy_float64*)PyArray_GETPTR3(bresult, i, j, k) += wval*bval;
-      *(npy_float64*)PyArray_GETPTR3(used, i, j, k) = 1.0;
-    }
-
-      Py_DECREF(bins_x); 
-      Py_DECREF(bins_y); 
-      Py_DECREF(bins_z); 
-      Py_DECREF(wsource); 
-      Py_DECREF(bsource); 
-      Py_DECREF(wresult); 
-      Py_DECREF(bresult); 
-      Py_DECREF(used);
-    
-      PyObject *onum_found = PyInt_FromLong((long)1);
-      return onum_found;
-    
-    _fail:
-      Py_XDECREF(bins_x); 
-      Py_XDECREF(bins_y); 
-      Py_XDECREF(bins_z); 
-      Py_XDECREF(wsource); 
-      Py_XDECREF(bsource); 
-      Py_XDECREF(wresult); 
-      Py_XDECREF(bresult); 
-      Py_XDECREF(used);
-      return NULL;
-
-}
-
 static PyObject *_dataCubeError;
 
 static PyObject *DataCubeGeneric(PyObject *obj, PyObject *args,
@@ -1430,7 +1077,7 @@
                     0, NULL);
     if(xi==NULL) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for xi.");
+             "FindContours: One dimension required for xi.");
     goto _fail;
     }
     
@@ -1439,7 +1086,7 @@
                     0, NULL);
     if((yi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(yi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for yi, same size as xi.");
+             "FindContours: One dimension required for yi, same size as xi.");
     goto _fail;
     }
     
@@ -1448,7 +1095,7 @@
                     0, NULL);
     if((zi==NULL) || (PyArray_SIZE(xi) != PyArray_SIZE(zi))) {
     PyErr_Format(_findContoursError,
-             "Bin2DProfile: One dimension required for zi, same size as xi.");
+             "FindContours: One dimension required for zi, same size as xi.");
     goto _fail;
     }
     
@@ -1789,9 +1436,6 @@
     {"Interpolate", Py_Interpolate, METH_VARARGS},
     {"DataCubeRefine", Py_DataCubeRefine, METH_VARARGS},
     {"DataCubeReplace", Py_DataCubeReplace, METH_VARARGS},
-    {"Bin1DProfile", Py_Bin1DProfile, METH_VARARGS},
-    {"Bin2DProfile", Py_Bin2DProfile, METH_VARARGS},
-    {"Bin3DProfile", Py_Bin3DProfile, METH_VARARGS},
     {"FindContours", Py_FindContours, METH_VARARGS},
     {"FindBindingEnergy", Py_FindBindingEnergy, METH_VARARGS},
     {"OutputFloatsToFile", Py_OutputFloatsToFile, METH_VARARGS},
@@ -1816,10 +1460,6 @@
     PyDict_SetItemString(d, "error", _interpolateError);
     _dataCubeError = PyErr_NewException("data_point_utilities.DataCubeError", NULL, NULL);
     PyDict_SetItemString(d, "error", _dataCubeError);
-    _profile2DError = PyErr_NewException("data_point_utilities.Profile2DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile2DError);
-    _profile3DError = PyErr_NewException("data_point_utilities.Profile3DError", NULL, NULL);
-    PyDict_SetItemString(d, "error", _profile3DError);
     _findContoursError = PyErr_NewException("data_point_utilities.FindContoursError", NULL, NULL);
     PyDict_SetItemString(d, "error", _findContoursError);
     _outputFloatsToFileError = PyErr_NewException("data_point_utilities.OutputFloatsToFileError", NULL, NULL);



https://bitbucket.org/yt_analysis/yt/changeset/dbc3109b9cff/
changeset:   dbc3109b9cff
branch:      yt
user:        MatthewTurk
date:        2012-03-13 19:38:13
summary:     Re-enabling storage of _std fields.
affected #:  1 file

diff -r 4b64f344ee99da93c1d9b2325d72056ea253b222 -r dbc3109b9cff7706c6866a6b38a74414c44f00c3 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -121,7 +121,7 @@
                 self.__data[field][ub] /= self.__weight_data[field][ub]
                 self.__std_data[field][ub] /= self.__weight_data[field][ub]
             self[field] = self.__data[field]
-            #self["%s_std" % field] = na.sqrt(self.__std_data[field])
+            self["%s_std" % field] = na.sqrt(self.__std_data[field])
         self["UsedBins"] = self.__used
         del self.__data, self.__std_data, self.__weight_data, self.__used

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list