[yt-svn] commit/yt: 6 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Jul 9 09:45:39 PDT 2015


6 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/eecd59b9fcbc/
Changeset:   eecd59b9fcbc
Branch:      yt
User:        chummels
Date:        2015-04-08 03:06:11+00:00
Summary:     Updating derived quantities to always provide YTArrays when possible or lists of YTArrays for multiple fields.
Affected #:  1 file

diff -r 0dca6f095d122c4178e74cb42e91d8877cedb5ae -r eecd59b9fcbc2685384e5f1ab5679a3e82e535dc yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -96,6 +96,10 @@
     r"""
     Calculates the weight average of a field or fields.
 
+    Returns a YTQuantity for each field requested; if one,
+    it returns a single YTQuantity, if many, it returns a list of YTQuantities
+    in order of the listed fields.  
+
     Where f is the field and w is the weight, the weighted average is
     Sum_i(f_i \* w_i) / Sum_i(w_i).
 
@@ -173,8 +177,9 @@
 
 class TotalMass(TotalQuantity):
     r"""
-    Calculates the total mass in gas and particles. Returns a tuple where the
-    first part is total gas mass and the second part is total particle mass.
+    Calculates the total mass of the object. Returns a YTArray where the
+    first element is total gas mass, the second element is total particle mass,
+    and the third element is the total mass in both particle/grid forms.
 
     Examples
     --------
@@ -189,11 +194,14 @@
         fi = self.data_source.ds.field_info
         fields = []
         if ("gas", "cell_mass") in fi:
-            fields.append(("gas", "cell_mass"))
+            gas = super(TotalMass, self).__call__([('gas', 'cell_mass')])
+        else:
+            gas = self.data_source.ds.arr([0], 'g')
         if ("all", "particle_mass") in fi:
-            fields.append(("all", "particle_mass"))
-        rv = super(TotalMass, self).__call__(fields)
-        return rv
+            part = super(TotalMass, self).__call__([('all', 'particle_mass')])
+        else:
+            part = self.data_source.ds.arr([0], 'g')
+        return self.data_source.ds.arr([gas, part, np.sum([gas,part])])
 
 class CenterOfMass(DerivedQuantity):
     r"""
@@ -330,7 +338,10 @@
 class WeightedVariance(DerivedQuantity):
     r"""
     Calculates the weighted variance and weighted mean for a field
-    or list of fields.
+    or list of fields. Returns a YTArray for each field requested; if one,
+    it returns a single YTArray, if many, it returns a list of YTArrays
+    in order of the listed fields.  The first element of each YTArray is
+    the weighted variance, and the second element is the weighted mean.
 
     Where f is the field, w is the weight, and <f_w> is the weighted mean,
     the weighted variance is
@@ -384,10 +395,10 @@
             my_mean = values[i]
             my_var2 = values[i + int(len(values) / 2)]
             all_mean = (my_weight * my_mean).sum(dtype=np.float64) / all_weight
-            rvals.append(np.sqrt((my_weight * (my_var2 +
-                                               (my_mean - all_mean)**2)).sum(dtype=np.float64) /
-                                               all_weight))
-            rvals.append(all_mean)
+            rvals.append(self.data_source.ds.arr([(np.sqrt((my_weight * 
+                                                 (my_var2 + (my_mean - 
+                                                  all_mean)**2)).sum(dtype=np.float64) 
+                                                  / all_weight)), all_mean]))
         return rvals
 
 class AngularMomentumVector(DerivedQuantity):
@@ -395,6 +406,7 @@
     Calculates the angular momentum vector, using gas and/or particles.
 
     The angular momentum vector is the mass-weighted mean specific angular momentum.
+    Returns a YTArray of the vector.
 
     Parameters
     ----------
@@ -453,11 +465,15 @@
             jy += values.pop(0).sum(dtype=np.float64)
             jz += values.pop(0).sum(dtype=np.float64)
             m  += values.pop(0).sum(dtype=np.float64)
-        return (jx / m, jy / m, jz / m)
+        return self.data_source.ds.arr([jx / m, jy / m, jz / m])
 
 class Extrema(DerivedQuantity):
     r"""
     Calculates the min and max value of a field or list of fields.
+    Returns a YTArray for each field requested.  If one, a single YTArray
+    is returned, if many, a list of YTArrays in order of field list is 
+    returned.  The first element of each YTArray is the minimum of the
+    field and the second is the maximum of the field.
 
     Parameters
     ----------
@@ -500,7 +516,7 @@
 
     def reduce_intermediate(self, values):
         # The values get turned into arrays here.
-        return [(mis.min(), mas.max() )
+        return [self.data_source.ds.arr([mis.min(), mas.max()])
                 for mis, mas in zip(values[::2], values[1::2])]
 
 class MaxLocation(DerivedQuantity):


https://bitbucket.org/yt_analysis/yt/commits/3ecc1bff8a67/
Changeset:   3ecc1bff8a67
Branch:      yt
User:        chummels
Date:        2015-04-08 03:07:09+00:00
Summary:     Fixing a bug with AngularMomentumVector derived quantity, which prevented it from working with certain data objects (ie ds.all_data()).
Affected #:  1 file

diff -r eecd59b9fcbc2685384e5f1ab5679a3e82e535dc -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -428,10 +428,6 @@
 
     """
     def count_values(self, use_gas=True, use_particles=True):
-        use_gas &= \
-          (("gas", "cell_mass") in self.data_source.ds.field_info)
-        use_particles &= \
-          (("all", "particle_mass") in self.data_source.ds.field_info)
         num_vals = 0
         if use_gas: num_vals += 4
         if use_particles: num_vals += 4


https://bitbucket.org/yt_analysis/yt/commits/b2c60e683290/
Changeset:   b2c60e683290
Branch:      yt
User:        chummels
Date:        2015-04-08 03:08:48+00:00
Summary:     Merging.
Affected #:  25 files

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,6 +12,7 @@
 yt/analysis_modules/ppv_cube/ppv_utils.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/geometry/fake_octree.c
+yt/geometry/grid_container.c
 yt/geometry/oct_container.c
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
@@ -45,7 +46,6 @@
 yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
-yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 yt/utilities/lib/write_array.c

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -732,6 +732,10 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
+        if hasattr(self, 'magnetic_unit'):
+            # If we do not have this set, but some fields come in in
+            # "code_magnetic", this will allow them to remain in that unit.
+            self.unit_registry.modify("code_magnetic", self.magnetic_unit)
         vel_unit = getattr(
             self, "velocity_unit", self.length_unit / self.time_unit)
         pressure_unit = getattr(

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/frontends/chombo/data_structures.py
--- a/yt/frontends/chombo/data_structures.py
+++ b/yt/frontends/chombo/data_structures.py
@@ -18,7 +18,6 @@
 import os
 import weakref
 import numpy as np
-import six
 
 from stat import \
     ST_CTIME
@@ -26,6 +25,7 @@
 from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
+from yt.extern import six
 from yt.geometry.grid_geometry_handler import \
     GridIndex
 from yt.data_objects.static_output import \
@@ -283,6 +283,7 @@
         self.length_unit = self.quan(1.0, "cm")
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
+        self.magnetic_unit = self.quan(1.0, "gauss")
         self.velocity_unit = self.length_unit / self.time_unit
 
     def _localize(self, f, default):

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -53,9 +53,9 @@
         ("Y-momentum", (mom_units, ["momentum_y"], None)),
         ("Z-momentum", (mom_units, ["momentum_z"], None)),
         ("temperature", ("K", ["temperature"], None)),
-        ("X-magnfield", ("gauss", ["magnetic_field_x"], None)),
-        ("Y-magnfield", ("gauss", ["magnetic_field_y"], None)),
-        ("Z-magnfield", ("gauss", ["magnetic_field_z"], None)),
+        ("X-magnfield", (b_units, ["magnetic_field_x"], None)),
+        ("Y-magnfield", (b_units, ["magnetic_field_y"], None)),
+        ("Z-magnfield", (b_units, ["magnetic_field_z"], None)),
     )
 
     known_particle_fields = (
@@ -120,7 +120,7 @@
 
         def _get_vel(axis):
             def velocity(field, data):
-                return data["momentum_%s" % ax]/data["density"]
+                return data["momentum_%s" % axis]/data["density"]
             return velocity
 
         for ax in 'xyz':

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -58,7 +58,7 @@
     FieldInfoContainer, NullFunc
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level
-from yt.utilities.lib.GridTree import \
+from yt.geometry.grid_container import \
     GridTree, \
     MatchPointsToGrids
 from yt.utilities.decompose import \
@@ -435,6 +435,7 @@
         grid_tree = GridTree(num_grids, 
                              ds.stream_handler.left_edges,
                              ds.stream_handler.right_edges,
+                             ds.stream_handler.dimensions,
                              ds.stream_handler.parent_ids,
                              levels, num_children)
 

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -285,20 +285,21 @@
 class YTDataChunk(object):
 
     def __init__(self, dobj, chunk_type, objs, data_size = None,
-                 field_type = None, cache = False):
+                 field_type = None, cache = False, fast_index = None):
         self.dobj = dobj
         self.chunk_type = chunk_type
         self.objs = objs
         self.data_size = data_size
         self._field_type = field_type
         self._cache = cache
+        self._fast_index = fast_index
 
     def _accumulate_values(self, method):
         # We call this generically.  It's somewhat slower, since we're doing
         # costly getattr functions, but this allows us to generalize.
         mname = "select_%s" % method
         arrs = []
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             f = getattr(obj, mname)
             arrs.append(f(self.dobj))
         if method == "dtcoords":
@@ -311,12 +312,18 @@
 
     @cached_property
     def fcoords(self):
+        if self._fast_index is not None:
+            ci = self._fast_index.select_fcoords(
+                self.dobj.selector, self.data_size)
+            ci = YTArray(ci, input_units = "code_length",
+                         registry = self.dobj.ds.unit_registry)
+            return ci
         ci = np.empty((self.data_size, 3), dtype='float64')
         ci = YTArray(ci, input_units = "code_length",
                      registry = self.dobj.ds.unit_registry)
         if self.data_size == 0: return ci
         ind = 0
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             c = obj.select_fcoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
@@ -325,10 +332,14 @@
 
     @cached_property
     def icoords(self):
+        if self._fast_index is not None:
+            ci = self._fast_index.select_icoords(
+                self.dobj.selector, self.data_size)
+            return ci
         ci = np.empty((self.data_size, 3), dtype='int64')
         if self.data_size == 0: return ci
         ind = 0
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             c = obj.select_icoords(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
@@ -337,12 +348,18 @@
 
     @cached_property
     def fwidth(self):
+        if self._fast_index is not None:
+            ci = self._fast_index.select_fwidth(
+                self.dobj.selector, self.data_size)
+            ci = YTArray(ci, input_units = "code_length",
+                         registry = self.dobj.ds.unit_registry)
+            return ci
         ci = np.empty((self.data_size, 3), dtype='float64')
         ci = YTArray(ci, input_units = "code_length",
                      registry = self.dobj.ds.unit_registry)
         if self.data_size == 0: return ci
         ind = 0
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             c = obj.select_fwidth(self.dobj)
             if c.shape[0] == 0: continue
             ci[ind:ind+c.shape[0], :] = c
@@ -351,10 +368,14 @@
 
     @cached_property
     def ires(self):
+        if self._fast_index is not None:
+            ci = self._fast_index.select_ires(
+                self.dobj.selector, self.data_size)
+            return ci
         ci = np.empty(self.data_size, dtype='int64')
         if self.data_size == 0: return ci
         ind = 0
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             c = obj.select_ires(self.dobj)
             if c.shape == 0: continue
             ci[ind:ind+c.size] = c
@@ -373,7 +394,7 @@
         self._tcoords = ct # Se this for tcoords
         if self.data_size == 0: return cdt
         ind = 0
-        for obj in self.objs:
+        for obj in self._fast_index or self.objs:
             gdt, gt = obj.select_tcoords(self.dobj)
             if gt.shape == 0: continue
             ct[ind:ind+gt.size] = gt

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/grid_container.pxd
--- /dev/null
+++ b/yt/geometry/grid_container.pxd
@@ -0,0 +1,65 @@
+"""
+Matching points on the grid to specific grids
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from libc.stdlib cimport malloc, free
+from libc.math cimport nearbyint, rint
+from yt.geometry.selection_routines cimport SelectorObject, _ensure_code
+from yt.utilities.lib.fp_utils cimport iclip
+from grid_visitors cimport GridTreeNode, GridVisitorData, grid_visitor_function
+cimport grid_visitors 
+from yt.utilities.lib.bitarray cimport bitarray
+
+cdef class GridTree:
+    cdef GridTreeNode *grids
+    cdef GridTreeNode *root_grids
+    cdef int num_grids
+    cdef int num_root_grids
+    cdef int num_leaf_grids
+    cdef public bitarray mask
+    cdef void setup_data(self, GridVisitorData *data)
+    cdef void visit_grids(self, GridVisitorData *data,
+                          grid_visitor_function *func,
+                          SelectorObject selector)
+    cdef void recursively_visit_grid(self,
+                          GridVisitorData *data,
+                          grid_visitor_function *func,
+                          SelectorObject selector,
+                          GridTreeNode *grid,
+                          np.uint8_t *buf = ?)
+
+cdef class MatchPointsToGrids:
+
+    cdef int num_points
+    cdef np.float64_t *xp
+    cdef np.float64_t *yp
+    cdef np.float64_t *zp
+    cdef GridTree tree
+    cdef np.int64_t *point_grids
+    cdef np.uint8_t check_position(self,
+                                   np.int64_t pt_index, 
+                                   np.float64_t x,
+                                   np.float64_t y,
+                                   np.float64_t z,
+                                   GridTreeNode *grid)
+
+    cdef np.uint8_t is_in_grid(self,
+			 np.float64_t x,
+			 np.float64_t y,
+			 np.float64_t z,
+			 GridTreeNode *grid)

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/grid_container.pyx
--- /dev/null
+++ b/yt/geometry/grid_container.pyx
@@ -0,0 +1,333 @@
+"""
+Matching points on the grid to specific grids
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.math cimport rint
+from yt.utilities.lib.bitarray cimport bitarray
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef GridTreeNode Grid_initialize(np.ndarray[np.float64_t, ndim=1] le,
+                                  np.ndarray[np.float64_t, ndim=1] re,
+                                  np.ndarray[np.int32_t, ndim=1] dims,
+                                  int num_children, int level, int index):
+
+    cdef GridTreeNode node
+    cdef int i
+
+    node.index = index
+    node.level = level
+    for i in range(3):
+        node.left_edge[i] = le[i]
+        node.right_edge[i] = re[i]
+        node.dims[i] = dims[i]
+        node.dds[i] = (re[i] - le[i])/dims[i]
+        node.start_index[i] = <np.int64_t> rint(le[i] / node.dds[i])
+    node.num_children = num_children
+    if num_children <= 0:
+        node.children = NULL
+        return node
+    node.children = <GridTreeNode **> malloc(
+            sizeof(GridTreeNode *) * num_children)
+    for i in range(num_children):
+        node.children[i] = NULL
+
+    return node
+
+cdef class GridTree:
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, int num_grids, 
+                  np.ndarray[np.float64_t, ndim=2] left_edge,
+                  np.ndarray[np.float64_t, ndim=2] right_edge,
+                  np.ndarray[np.int32_t, ndim=2] dimensions,
+                  np.ndarray[np.int64_t, ndim=1] parent_ind,
+                  np.ndarray[np.int64_t, ndim=1] level,
+                  np.ndarray[np.int64_t, ndim=1] num_children):
+
+        cdef int i, j, k
+        cdef np.ndarray[np.int64_t, ndim=1] child_ptr
+
+        child_ptr = np.zeros(num_grids, dtype='int64')
+
+        self.num_grids = num_grids
+        self.num_root_grids = 0
+        self.num_leaf_grids = 0
+        
+        self.grids = <GridTreeNode *> malloc(
+                sizeof(GridTreeNode) * num_grids)
+                
+        for i in range(num_grids):
+            self.grids[i] = Grid_initialize(left_edge[i,:],
+                                            right_edge[i,:],
+                                            dimensions[i,:],
+                                            num_children[i],
+                                            level[i], i)
+            if level[i] == 0:
+                self.num_root_grids += 1
+            if num_children[i] == 0:
+                self.num_leaf_grids += 1
+
+        self.root_grids = <GridTreeNode *> malloc(
+                sizeof(GridTreeNode) * self.num_root_grids)
+        k = 0
+        for i in range(num_grids):
+            j = parent_ind[i]
+            if j >= 0:
+                self.grids[j].children[child_ptr[j]] = &self.grids[i]
+                child_ptr[j] += 1
+            else:
+                if k >= self.num_root_grids:
+                    raise RuntimeError
+                self.root_grids[k] = self.grids[i] 
+                k = k + 1
+
+    def __init__(self, *args, **kwargs):
+        self.mask = None
+
+    def __iter__(self):
+        yield self
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def return_tree_info(self):
+        cdef int i, j
+        levels = []
+        indices = []
+        nchild = []
+        children = []
+        for i in range(self.num_grids): 
+            childs = []
+            levels.append(self.grids[i].level)
+            indices.append(self.grids[i].index)
+            nchild.append(self.grids[i].num_children)
+            for j in range(self.grids[i].num_children):
+                childs.append(self.grids[i].children[j].index)
+            children.append(childs)
+        return indices, levels, nchild, children
+
+    cdef void setup_data(self, GridVisitorData *data):
+        # Being handed a new GVD object, we initialize it to sane defaults.
+        data.index = 0
+        data.global_index = 0
+        data.n_tuples = 0
+        data.child_tuples = NULL
+        data.array = NULL
+        data.ref_factor = 2 #### FIX THIS
+
+    cdef void visit_grids(self, GridVisitorData *data,
+                          grid_visitor_function *func,
+                          SelectorObject selector):
+        # This iterates over all root grids, given a selector+data, and then
+        # visits each one and its children.
+        cdef int i, n
+        # Because of confusion about mapping of children to parents, we are
+        # going to do this the stupid way for now.
+        cdef GridTreeNode *grid
+        cdef np.uint8_t *buf = NULL
+        if self.mask is not None:
+            buf = self.mask.buf
+        for i in range(self.num_root_grids):
+            grid = &self.root_grids[i]
+            self.recursively_visit_grid(data, func, selector, grid, buf)
+        grid_visitors.free_tuples(data)
+
+    cdef void recursively_visit_grid(self, GridVisitorData *data,
+                                     grid_visitor_function *func,
+                                     SelectorObject selector,
+                                     GridTreeNode *grid,
+                                     np.uint8_t *buf = NULL):
+        # Visit this grid and all of its child grids, with a given grid visitor
+        # function.  We early terminate if we are not selected by the selector.
+        cdef int i
+        data.grid = grid
+        if selector.select_bbox(grid.left_edge, grid.right_edge) == 0:
+            # Note that this does not increment the global_index.
+            return
+        grid_visitors.setup_tuples(data)
+        selector.visit_grid_cells(data, func, buf)
+        for i in range(grid.num_children):
+            self.recursively_visit_grid(data, func, selector, grid.children[i],
+                                        buf)
+
+    def count(self, SelectorObject selector):
+        # Use the counting grid visitor
+        cdef GridVisitorData data
+        self.setup_data(&data)
+        cdef np.uint64_t size = 0
+        cdef int i
+        for i in range(self.num_grids):
+            size += (self.grids[i].dims[0] *
+                     self.grids[i].dims[1] *
+                     self.grids[i].dims[2])
+        cdef bitarray mask = bitarray(size)
+        data.array = <void*>mask.buf
+        self.visit_grids(&data, grid_visitors.mask_cells, selector)
+        self.mask = mask
+        size = 0
+        self.setup_data(&data)
+        data.array = <void*>(&size)
+        self.visit_grids(&data,  grid_visitors.count_cells, selector)
+        return size
+
+    def select_icoords(self, SelectorObject selector, np.uint64_t size = -1):
+        # Fill icoords with a selector
+        cdef GridVisitorData data
+        self.setup_data(&data)
+        if size == -1:
+            size = 0
+            data.array = <void*>(&size)
+            self.visit_grids(&data,  grid_visitors.count_cells, selector)
+        cdef np.ndarray[np.int64_t, ndim=2] icoords 
+        icoords = np.empty((size, 3), dtype="int64")
+        data.array = icoords.data
+        self.visit_grids(&data, grid_visitors.icoords_cells, selector)
+        return icoords
+
+    def select_ires(self, SelectorObject selector, np.uint64_t size = -1):
+        # Fill ires with a selector
+        cdef GridVisitorData data
+        self.setup_data(&data)
+        if size == -1:
+            size = 0
+            data.array = <void*>(&size)
+            self.visit_grids(&data,  grid_visitors.count_cells, selector)
+        cdef np.ndarray[np.int64_t, ndim=1] ires 
+        ires = np.empty(size, dtype="int64")
+        data.array = ires.data
+        self.visit_grids(&data, grid_visitors.ires_cells, selector)
+        return ires
+
+    def select_fcoords(self, SelectorObject selector, np.uint64_t size = -1):
+        # Fill fcoords with a selector
+        cdef GridVisitorData data
+        self.setup_data(&data)
+        if size == -1:
+            size = 0
+            data.array = <void*>(&size)
+            self.visit_grids(&data,  grid_visitors.count_cells, selector)
+        cdef np.ndarray[np.float64_t, ndim=2] fcoords 
+        fcoords = np.empty((size, 3), dtype="float64")
+        data.array = fcoords.data
+        self.visit_grids(&data, grid_visitors.fcoords_cells, selector)
+        return fcoords
+
+    def select_fwidth(self, SelectorObject selector, np.uint64_t size = -1):
+        # Fill fwidth with a selector
+        cdef GridVisitorData data
+        self.setup_data(&data)
+        if size == -1:
+            size = 0
+            data.array = <void*>(&size)
+            self.visit_grids(&data,  grid_visitors.count_cells, selector)
+        cdef np.ndarray[np.float64_t, ndim=2] fwidth 
+        fwidth = np.empty((size, 3), dtype="float64")
+        data.array = fwidth.data
+        self.visit_grids(&data, grid_visitors.fwidth_cells, selector)
+        return fwidth
+    
+cdef class MatchPointsToGrids:
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __cinit__(self, GridTree tree,
+                  int num_points, 
+                  np.ndarray[np.float64_t, ndim=1] x,
+                  np.ndarray[np.float64_t, ndim=1] y,
+                  np.ndarray[np.float64_t, ndim=1] z):
+
+        cdef int i
+        
+        self.num_points = num_points
+        self.xp = <np.float64_t *> malloc(
+                sizeof(np.float64_t) * num_points)
+        self.yp = <np.float64_t *> malloc(
+                sizeof(np.float64_t) * num_points)
+        self.zp = <np.float64_t *> malloc(
+                sizeof(np.float64_t) * num_points)
+        self.point_grids = <np.int64_t *> malloc(
+                sizeof(np.int64_t) * num_points)
+        for i in range(num_points):
+            self.xp[i] = x[i]
+            self.yp[i] = y[i]
+            self.zp[i] = z[i]
+            self.point_grids[i] = -1
+        self.tree = tree
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def find_points_in_tree(self):
+        cdef np.ndarray[np.int64_t, ndim=1] pt_grids
+        cdef int i, j
+        cdef np.uint8_t in_grid
+        pt_grids = np.zeros(self.num_points, dtype='int64')
+        for i in range(self.num_points):
+            in_grid = 0
+            for j in range(self.tree.num_root_grids):
+                if not in_grid: 
+                    in_grid = self.check_position(i, self.xp[i], self.yp[i], self.zp[i],
+                                                  &self.tree.root_grids[j])
+        for i in range(self.num_points):
+            pt_grids[i] = self.point_grids[i]
+        return pt_grids
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    cdef np.uint8_t check_position(self,
+                                   np.int64_t pt_index, 
+                                   np.float64_t x,
+                                   np.float64_t y,
+                                   np.float64_t z,
+                                   GridTreeNode * grid):
+        cdef int i
+        cdef np.uint8_t in_grid
+        in_grid = self.is_in_grid(x, y, z, grid)
+        if in_grid:
+            if grid.num_children > 0:
+                in_grid = 0
+                for i in range(grid.num_children):
+                    if not in_grid:
+                        in_grid = self.check_position(pt_index, x, y, z, grid.children[i])
+                if not in_grid:
+                    self.point_grids[pt_index] = grid.index
+                    in_grid = 1
+            else:
+                self.point_grids[pt_index] = grid.index
+                in_grid = 1
+        return in_grid
+    
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef np.uint8_t is_in_grid(self,
+             np.float64_t x,
+             np.float64_t y,
+             np.float64_t z,
+             GridTreeNode * grid):
+        if x >= grid.right_edge[0]: return 0
+        if y >= grid.right_edge[1]: return 0
+        if z >= grid.right_edge[2]: return 0
+        if x < grid.left_edge[0]: return 0
+        if y < grid.left_edge[1]: return 0
+        if z < grid.left_edge[2]: return 0
+        return 1
+

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -35,7 +35,8 @@
 from yt.utilities.io_handler import io_registry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface
-from yt.utilities.lib.GridTree import GridTree, MatchPointsToGrids
+from .grid_container import \
+    GridTree, MatchPointsToGrids
 
 from yt.data_objects.data_containers import data_object_registry
 
@@ -248,7 +249,7 @@
         ind = pts.find_points_in_tree()
         return self.grids[ind], ind
 
-    def _get_grid_tree(self) :
+    def _get_grid_tree(self):
 
         left_edge = self.ds.arr(np.zeros((self.num_grids, 3)),
                                'code_length')
@@ -257,6 +258,7 @@
         level = np.zeros((self.num_grids), dtype='int64')
         parent_ind = np.zeros((self.num_grids), dtype='int64')
         num_children = np.zeros((self.num_grids), dtype='int64')
+        dimensions = np.zeros((self.num_grids, 3), dtype="int32")
 
         for i, grid in enumerate(self.grids) :
 
@@ -268,14 +270,16 @@
             else :
                 parent_ind[i] = grid.Parent.id - grid.Parent._id_offset
             num_children[i] = np.int64(len(grid.Children))
+            dimensions[i,:] = grid.ActiveDimensions
 
-        return GridTree(self.num_grids, left_edge, right_edge, parent_ind,
-                        level, num_children)
+        return GridTree(self.num_grids, left_edge, right_edge, dimensions,
+                        parent_ind, level, num_children)
 
     def convert(self, unit):
         return self.dataset.conversion_factors[unit]
 
     def _identify_base_chunk(self, dobj):
+        fast_index = None
         def _gsort(g):
             if g.filename is None:
                 return g.id
@@ -291,20 +295,29 @@
             dobj._chunk_info = np.empty(len(grids), dtype='object')
             for i, g in enumerate(grids):
                 dobj._chunk_info[i] = g
+        # These next two lines, when uncommented, turn "on" the fast index.
+        #if dobj._type_name != "grid":
+        #    fast_index = self._get_grid_tree()
         if getattr(dobj, "size", None) is None:
-            dobj.size = self._count_selection(dobj)
+            dobj.size = self._count_selection(dobj, fast_index = fast_index)
         if getattr(dobj, "shape", None) is None:
             dobj.shape = (dobj.size,)
-        dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
+        dobj._current_chunk = list(self._chunk_all(dobj, cache = False,
+                                   fast_index = fast_index))[0]
 
-    def _count_selection(self, dobj, grids = None):
+    def _count_selection(self, dobj, grids = None, fast_index = None):
+        if fast_index is not None:
+            return fast_index.count(dobj.selector)
         if grids is None: grids = dobj._chunk_info
         count = sum((g.count(dobj.selector) for g in grids))
         return count
 
-    def _chunk_all(self, dobj, cache = True):
+    def _chunk_all(self, dobj, cache = True, fast_index = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
+        fast_index = fast_index or getattr(dobj._current_chunk, "_fast_index",
+            None)
+        yield YTDataChunk(dobj, "all", gobjs, dobj.size, 
+                        cache, fast_index = fast_index)
         
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -339,6 +352,7 @@
         preload_fields, _ = self._split_fields(preload_fields)
         gfiles = defaultdict(list)
         gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        fast_index = dobj._current_chunk._fast_index
         for g in gobjs:
             gfiles[g.filename].append(g)
         for fn in sorted(gfiles):
@@ -351,7 +365,7 @@
                           in range(0, len(gs), size)):
                 dc = YTDataChunk(dobj, "io", grids,
                         self._count_selection(dobj, grids),
-                        cache = cache)
+                        cache = cache, fast_index = fast_index)
                 # We allow four full chunks to be included.
                 with self.io.preload(dc, preload_fields, 
                             4.0 * self._grid_chunksize):

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/grid_visitors.pxd
--- /dev/null
+++ b/yt/geometry/grid_visitors.pxd
@@ -0,0 +1,63 @@
+"""
+Grid visitor definitions file
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+
+cdef struct GridTreeNode:
+    int num_children
+    int level
+    np.int64_t index
+    np.float64_t left_edge[3]
+    np.float64_t right_edge[3]
+    GridTreeNode **children
+    np.int64_t start_index[3]
+    int dims[3]
+    np.float64_t dds[3]
+
+cdef struct GridVisitorData:
+    GridTreeNode *grid
+    np.uint64_t index
+    np.uint64_t global_index
+    np.int64_t pos[3]       # position in ints
+    int n_tuples
+    int **child_tuples # [N_child][6], where 0-1 are x_start, x_end, etc.
+    void *array
+    int ref_factor # This may change on a grid-by-grid basis
+                   # It is the number of cells a child grid has per dimension
+                   # in a cell of this grid.
+                            
+cdef void free_tuples(GridVisitorData *data) nogil
+cdef void setup_tuples(GridVisitorData *data) nogil
+cdef np.uint8_t check_child_masked(GridVisitorData *data) nogil
+
+ctypedef void grid_visitor_function(GridVisitorData *data,
+                                         np.uint8_t selected) nogil
+# This is similar in spirit to the way oct visitor functions work.  However,
+# there are a few important differences.  Because the grid objects are expected
+# to be bigger, we don't need to pass them along -- we will not be recursively
+# visiting.  So the GridVisitorData will be updated in between grids.
+# Furthermore, we're only going to use them for a much smaller subset of
+# operations.  All child mask evaluation is going to be conducted inside the
+# outermost level of the visitor function, and visitor functions will receive
+# information about whether they have been selected and whether they are
+# covered by child cells.
+
+cdef grid_visitor_function count_cells
+cdef grid_visitor_function mask_cells
+cdef grid_visitor_function icoords_cells
+cdef grid_visitor_function ires_cells
+cdef grid_visitor_function fcoords_cells
+cdef grid_visitor_function fwidth_cells

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/grid_visitors.pyx
--- /dev/null
+++ b/yt/geometry/grid_visitors.pyx
@@ -0,0 +1,147 @@
+"""
+Grid visitor functions
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free
+from yt.utilities.lib.fp_utils cimport iclip
+from yt.utilities.lib.bitarray cimport ba_set_value, ba_get_value
+
+cdef void free_tuples(GridVisitorData *data) nogil:
+    # This wipes out the tuples, which is necessary since they are
+    # heap-allocated
+    cdef int i
+    if data.child_tuples == NULL: return
+    for i in range(data.n_tuples):
+        free(data.child_tuples[i])
+    free(data.child_tuples)
+    data.child_tuples = NULL
+    data.n_tuples = 0
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void setup_tuples(GridVisitorData *data) nogil:
+    # This sets up child-mask tuples.  Rather than a single mask that covers
+    # everything, we instead allocate pairs of integers that are start/stop
+    # positions for child masks.  This may not be considerably more efficient
+    # memory-wise, but it is easier to keep and save when going through
+    # multiple grids and selectors.
+    cdef int i, j, k
+    cdef np.int64_t si, ei
+    cdef GridTreeNode *g, *c
+    free_tuples(data)
+    g = data.grid
+    data.child_tuples = <int**> malloc(sizeof(int*) * g.num_children)
+    for i in range(g.num_children):
+        c = g.children[i]
+        data.child_tuples[i] = <int *>malloc(sizeof(int) * 6)
+        # Now we fill them in
+        for j in range(3):
+            si = (c.start_index[j] / data.ref_factor) - g.start_index[j]
+            ei = si + c.dims[j]/data.ref_factor - 1
+            data.child_tuples[i][j*2+0] = iclip(si, 0, g.dims[j] - 1)
+            data.child_tuples[i][j*2+1] = iclip(ei, 0, g.dims[j] - 1)
+    data.n_tuples = g.num_children
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef np.uint8_t check_child_masked(GridVisitorData *data) nogil:
+    # This simply checks if we're inside any of the tuples.  Probably not the
+    # most efficient way, but the GVD* passed in has a position affiliated with
+    # it, and we can very easily look for that inside here.
+    cdef int i, j, k
+    cdef int *tup
+    for i in range(data.n_tuples):
+        # k is if we're inside a given child tuple.  We check each one
+        # individually, and invalidate if we're outside.
+        k = 1
+        tup = data.child_tuples[i]
+        for j in range(3):
+            # Check if pos is outside in any of the three dimensions
+            if data.pos[j] < tup[j*2+0] or data.pos[j] > tup[j*2+1]:
+                k = 0
+                break
+        if k == 1: return 1 # Return 1 for child masked
+    return 0 # Only return 0 if it doesn't match any of the children
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void count_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Simply increment for each one, if we've selected it.
+    if selected == 0: return
+    cdef np.uint64_t *count = <np.uint64_t*> data.array
+    count[0] += 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void mask_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Set our bitarray -- we're creating a mask -- if we are selected.
+    if selected == 0: return
+    cdef np.uint8_t *mask = <np.uint8_t*> data.array
+    ba_set_value(mask, data.global_index, 1)
+    # No need to increment anything.
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void icoords_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Nice and easy icoord setter.
+    if selected == 0: return
+    cdef int i
+    cdef np.int64_t *icoords = <np.int64_t*> data.array 
+    for i in range(3):
+        icoords[data.index * 3 + i] = data.pos[i] + data.grid.start_index[i]
+    data.index += 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void ires_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Fill with the level value.
+    if selected == 0: return
+    cdef int i
+    cdef np.int64_t *ires = <np.int64_t*> data.array
+    ires[data.index] = data.grid.level
+    data.index += 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void fwidth_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Fill with our dds.
+    if selected == 0: return
+    cdef int i
+    cdef np.float64_t *fwidth = <np.float64_t*> data.array 
+    for i in range(3):
+        fwidth[data.index * 3 + i] = data.grid.dds[i]
+    data.index += 1
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+cdef void fcoords_cells(GridVisitorData *data, np.uint8_t selected) nogil:
+    # Simple cell-centered position filling.
+    if selected == 0: return
+    cdef int i
+    cdef np.float64_t *fcoords = <np.float64_t*> data.array 
+    for i in range(3):
+        fcoords[data.index * 3 + i] = data.grid.left_edge[i] + \
+            (0.5 + data.pos[i])*data.grid.dds[i]
+    data.index += 1

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/object_finding_mixin.py
--- a/yt/geometry/object_finding_mixin.py
+++ b/yt/geometry/object_finding_mixin.py
@@ -20,7 +20,7 @@
 from yt.utilities.lib.misc_utilities import \
     get_box_grids_level, \
     get_box_grids_below_level
-from yt.utilities.lib.GridTree import \
+from yt.geometry.grid_container import \
     MatchPointsToGrids, \
     GridTree
 from yt.utilities.physical_constants import \
@@ -271,6 +271,7 @@
         level = np.zeros((self.num_grids), dtype='int64')
         parent_ind = np.zeros((self.num_grids), dtype='int64')
         num_children = np.zeros((self.num_grids), dtype='int64')
+        dimensions = np.zeros((self.num_grids, 3), dtype="int32")
 
         for i, grid in enumerate(self.grids) :
 
@@ -282,6 +283,7 @@
             else :
                 parent_ind[i] = grid.Parent.id - grid.Parent._id_offset
             num_children[i] = np.int64(len(grid.Children))
+            dimensions[i,:] = grid.ActiveDimensions
 
-        return GridTree(self.num_grids, left_edge, right_edge, parent_ind,
-                        level, num_children)
+        return GridTree(self.num_grids, left_edge, right_edge, dimensions,
+                        parent_ind, level, num_children)

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -303,6 +303,7 @@
     @cython.cdivision(True)
     cdef void _mask_positions(self, np.ndarray[anyfloat, ndim=2] pos,
                               np.uint64_t file_id, int filter):
+        # TODO: Replace with the bitarray
         cdef np.int64_t no = pos.shape[0]
         cdef np.int64_t p
         cdef int ind[3]

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/selection_routines.pxd
--- a/yt/geometry/selection_routines.pxd
+++ b/yt/geometry/selection_routines.pxd
@@ -17,11 +17,20 @@
 cimport numpy as np
 from oct_visitors cimport Oct, OctVisitorData, \
     oct_visitor_function
+from grid_visitors cimport GridTreeNode, GridVisitorData, \
+    grid_visitor_function, check_child_masked
 
 ctypedef fused anyfloat:
     np.float32_t
     np.float64_t
 
+cdef inline _ensure_code(arr):
+    if hasattr(arr, "units"):
+        if "code_length" == str(arr.units):
+            return arr
+        arr.convert_to_units("code_length")
+    return arr
+
 cdef class SelectorObject:
     cdef public np.int32_t min_level
     cdef public np.int32_t max_level
@@ -47,6 +56,14 @@
     cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil
+    cdef int fill_mask_selector(self, np.float64_t left_edge[3],
+                                np.float64_t right_edge[3], 
+                                np.float64_t dds[3], int dim[3],
+                                np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask,
+                                np.ndarray[np.uint8_t, ndim=3] mask,
+                                int level)
+    cdef void visit_grid_cells(self, GridVisitorData *data,
+                    grid_visitor_function *func, np.uint8_t *cached_mask = ?)
 
     # compute periodic distance (if periodicity set) assuming 0->domain_width[i] coordinates
     cdef np.float64_t difference(self, np.float64_t x1, np.float64_t x2, int d) nogil

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -24,6 +24,7 @@
 from .oct_visitors cimport cind
 from yt.utilities.lib.grid_traversal cimport \
     VolumeContainer, sample_function, walk_volume
+from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value
 
 cdef extern from "math.h":
     double exp(double x) nogil
@@ -52,13 +53,6 @@
 # These all respect the interface "dobj" and a set of left_edges, right_edges,
 # sometimes also accepting level and mask information.
 
-def _ensure_code(arr):
-    if hasattr(arr, "units"):
-        if "code_length" == str(arr.units):
-            return arr
-        arr.convert_to_units("code_length")
-    return arr
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 @cython.cdivision(True)
@@ -431,7 +425,6 @@
         cdef np.ndarray[np.float64_t, ndim=1] oright_edge = gobj.RightEdge.d
         cdef int i, j, k
         cdef np.float64_t dds[3]
-        cdef np.float64_t pos[3]
         cdef np.float64_t left_edge[3]
         cdef np.float64_t right_edge[3]
         for i in range(3):
@@ -440,14 +433,29 @@
             left_edge[i] = oleft_edge[i]
             right_edge[i] = oright_edge[i]
         mask = np.zeros(gobj.ActiveDimensions, dtype='uint8')
-        cdef int total = 0
-        cdef int temp
         # Check for the level bounds
         cdef np.int32_t level = gobj.Level
+        # We set this to 1 if we ignore child_mask
+        cdef int total
+        total = self.fill_mask_selector(left_edge, right_edge, dds, dim,
+                                        child_mask, mask, level)
+        if total == 0: return None
+        return mask.astype("bool")
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef int fill_mask_selector(self, np.float64_t left_edge[3],
+                                np.float64_t right_edge[3], 
+                                np.float64_t dds[3], int dim[3],
+                                np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask,
+                                np.ndarray[np.uint8_t, ndim=3] mask,
+                                int level):
+        cdef int i, j, k
+        cdef int total = 0, this_level = 0
+        cdef np.float64_t pos[3]
         if level < self.min_level or level > self.max_level:
-            return mask.astype("bool")
-        # We set this to 1 if we ignore child_mask
-        cdef int this_level = 0
+            return 0
         if level == self.max_level:
             this_level = 1
         with nogil:
@@ -463,8 +471,68 @@
                         pos[2] += dds[2]
                     pos[1] += dds[1]
                 pos[0] += dds[0]
-        if total == 0: return None
-        return mask.astype("bool")
+        return total
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    cdef void visit_grid_cells(self, GridVisitorData *data,
+                              grid_visitor_function *func,
+                              np.uint8_t *cached_mask = NULL):
+        # This function accepts a grid visitor function, the data that
+        # corresponds to the current grid being examined (the most important
+        # aspect of which is the .grid attribute, along with index values and
+        # void* pointers to arrays) and a possibly-pre-generated cached mask.
+        # Each cell is visited with the grid visitor function.
+        cdef np.float64_t left_edge[3], right_edge[3]
+        cdef np.float64_t dds[3]
+        cdef int dim[3], level, i
+        cdef int total = 0, this_level = 0
+        cdef np.float64_t pos[3]
+        level = data.grid.level
+        if level < self.min_level or level > self.max_level:
+            return
+        if level == self.max_level:
+            this_level = 1
+        cdef np.uint8_t child_masked, selected
+        for i in range(3):
+            left_edge[i] = data.grid.left_edge[i]
+            right_edge[i] = data.grid.right_edge[i]
+            dds[i] = (right_edge[i] - left_edge[i])/data.grid.dims[i]
+            dim[i] = data.grid.dims[i]
+        with nogil:
+            pos[0] = left_edge[0] + dds[0] * 0.5
+            data.pos[0] = 0
+            for i in range(dim[0]):
+                pos[1] = left_edge[1] + dds[1] * 0.5
+                data.pos[1] = 0
+                for j in range(dim[1]):
+                    pos[2] = left_edge[2] + dds[2] * 0.5
+                    data.pos[2] = 0
+                    for k in range(dim[2]):
+                        # We short-circuit if we have a cache; if we don't, we
+                        # only set selected to true if it's *not* masked by a
+                        # child and it *is* selected.
+                        if cached_mask != NULL:
+                            selected = ba_get_value(cached_mask,
+                                                    data.global_index)
+                        else:
+                            if this_level == 1:
+                                child_masked = 0
+                            else:
+                                child_masked = check_child_masked(data)
+                            if child_masked == 0:
+                                selected = self.select_cell(pos, dds)
+                            else:
+                                selected = 0
+                        func(data, selected)
+                        data.global_index += 1
+                        pos[2] += dds[2]
+                        data.pos[2] += 1
+                    pos[1] += dds[1]
+                    data.pos[1] += 1
+                pos[0] += dds[0]
+                data.pos[0] += 1
 
     @cython.boundscheck(False)
     @cython.wraparound(False)

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/setup.py
--- a/yt/geometry/setup.py
+++ b/yt/geometry/setup.py
@@ -8,6 +8,19 @@
     from numpy.distutils.misc_util import Configuration
     config = Configuration('geometry',parent_package,top_path)
     config.add_subpackage('coordinates')
+    config.add_extension("grid_visitors", 
+                ["yt/geometry/grid_visitors.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/grid_visitors.pxd"])
+    config.add_extension("grid_container", 
+                ["yt/geometry/grid_container.pyx"],
+                include_dirs=["yt/utilities/lib/"],
+                libraries=["m"],
+                depends=["yt/utilities/lib/fp_utils.pxd",
+                         "yt/geometry/grid_container.pxd",
+                         "yt/geometry/grid_visitors.pxd"])
     config.add_extension("oct_container", 
                 ["yt/geometry/oct_container.pyx"],
                 include_dirs=["yt/utilities/lib/"],
@@ -35,6 +48,9 @@
                 libraries=["m"],
                 depends=["yt/utilities/lib/fp_utils.pxd",
                          "yt/geometry/oct_container.pxd",
+                         "yt/geometry/oct_visitors.pxd",
+                         "yt/geometry/grid_container.pxd",
+                         "yt/geometry/grid_visitors.pxd",
                          "yt/geometry/selection_routines.pxd"])
     config.add_extension("particle_deposit", 
                 ["yt/geometry/particle_deposit.pyx"],

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/geometry/tests/test_grid_container.py
--- /dev/null
+++ b/yt/geometry/tests/test_grid_container.py
@@ -0,0 +1,118 @@
+"""
+Tests for GridTree
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import numpy as np
+import random
+
+from yt.testing import \
+    assert_equal, assert_raises
+from yt.frontends.stream.api import \
+    load_amr_grids
+
+
+def setup():
+    """Prepare setup specific environment"""
+    global test_ds
+
+    grid_data = [
+        dict(left_edge=[0.0, 0.0, 0.0], right_edge=[1.0, 1.0, 1.],
+             level=0, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.25, 0.25, 0.25], right_edge=[0.75, 0.75, 0.75],
+             level=1, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.25, 0.25, 0.375], right_edge=[0.5, 0.5, 0.625],
+             level=2, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.5, 0.5, 0.375], right_edge=[0.75, 0.75, 0.625],
+             level=2, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.3125, 0.3125, 0.4375],
+             right_edge=[0.4375, 0.4375, 0.5625],
+             level=3, dimensions=[16, 16, 16]),
+        dict(left_edge=[0.5625, 0.5625, 0.4375],
+             right_edge=[0.6875, 0.6875, 0.5625],
+             level=3, dimensions=[16, 16, 16])
+    ]
+
+    for grid in grid_data:
+        grid["density"] = \
+            np.random.random(grid["dimensions"]) * 2 ** grid["level"]
+    test_ds = load_amr_grids(grid_data, [16, 16, 16], 1.0)
+
+
+def test_grid_tree():
+    """Main test suite for GridTree"""
+    grid_tree = test_ds.index._get_grid_tree()
+    indices, levels, nchild, children = grid_tree.return_tree_info()
+
+    grid_levels = [grid.Level for grid in test_ds.index.grids]
+
+    grid_indices = [grid.id - grid._id_offset for grid in test_ds.index.grids]
+    grid_nchild = [len(grid.Children) for grid in test_ds.index.grids]
+
+    yield assert_equal, levels, grid_levels
+    yield assert_equal, indices, grid_indices
+    yield assert_equal, nchild, grid_nchild
+
+    for i, grid in enumerate(test_ds.index.grids):
+        if grid_nchild[i] > 0:
+            grid_children = np.array([child.id - child._id_offset
+                                      for child in grid.Children])
+            yield assert_equal, grid_children, children[i]
+
+def test_find_points():
+    """Main test suite for MatchPoints"""
+    num_points = 100
+    randx = np.random.uniform(low=test_ds.domain_left_edge[0],
+                              high=test_ds.domain_right_edge[0],
+                              size=num_points)
+    randy = np.random.uniform(low=test_ds.domain_left_edge[1],
+                              high=test_ds.domain_right_edge[1],
+                              size=num_points)
+    randz = np.random.uniform(low=test_ds.domain_left_edge[2],
+                              high=test_ds.domain_right_edge[2],
+                              size=num_points)
+
+    point_grids, point_grid_inds = test_ds.index._find_points(randx, randy, randz)
+
+    grid_inds = np.zeros((num_points), dtype='int64')
+
+    for ind, ixx, iyy, izz in zip(range(num_points), randx, randy, randz):
+
+        pos = np.array([ixx, iyy, izz])
+        pt_level = -1
+
+        for grid in test_ds.index.grids:
+
+            if np.all(pos >= grid.LeftEdge) and \
+               np.all(pos <= grid.RightEdge) and \
+               grid.Level > pt_level:
+                pt_level = grid.Level
+                grid_inds[ind] = grid.id - grid._id_offset
+
+    yield assert_equal, point_grid_inds, grid_inds
+
+    # Test wheter find_points works for lists
+    point_grids, point_grid_inds = test_ds.index._find_points(randx.tolist(),
+                                                              randy.tolist(),
+                                                              randz.tolist())
+    yield assert_equal, point_grid_inds, grid_inds
+
+    # Test if find_points works for scalar
+    ind = random.randint(0, num_points - 1)
+    point_grids, point_grid_inds = test_ds.index._find_points(randx[ind],
+                                                              randy[ind],
+                                                              randz[ind])
+    yield assert_equal, point_grid_inds, grid_inds[ind]
+
+    # Test if find_points fails properly for non equal indices' array sizes
+    yield assert_raises, AssertionError, test_ds.index._find_points, \
+        [0], 1.0, [2, 3]

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/GridTree.pyx
--- a/yt/utilities/lib/GridTree.pyx
+++ /dev/null
@@ -1,267 +0,0 @@
-"""
-Matching points on the grid to specific grids
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-cimport numpy as np
-cimport cython
-
-from libc.stdlib cimport malloc, free
-
-cdef struct GridTreeNode :
-    int num_children
-    int level
-    int index
-    np.float64_t left_edge[3]
-    np.float64_t right_edge[3]
-    GridTreeNode **children
-                
- at cython.boundscheck(False)
- at cython.wraparound(False)
- at cython.cdivision(True)
-cdef GridTreeNode Grid_initialize(np.ndarray[np.float64_t, ndim=1] le,
-                                  np.ndarray[np.float64_t, ndim=1] re,
-                                  int num_children, int level, int index) :
-
-    cdef GridTreeNode node
-    cdef int i
-
-    node.index = index
-    node.level = level
-    for i in range(3) :
-        node.left_edge[i] = le[i]
-        node.right_edge[i] = re[i]
-    node.num_children = num_children
-    
-    if num_children > 0:
-        node.children = <GridTreeNode **> malloc(sizeof(GridTreeNode *) *
-                                                 num_children)
-        for i in range(num_children) :
-            node.children[i] = NULL
-    else :
-        node.children = NULL
-
-    return node
-
-cdef class GridTree :
-
-    cdef GridTreeNode *grids
-    cdef GridTreeNode *root_grids
-    cdef int num_grids
-    cdef int num_root_grids
-    cdef int num_leaf_grids
-    
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def __cinit__(self, int num_grids, 
-                  np.ndarray[np.float64_t, ndim=2] left_edge,
-                  np.ndarray[np.float64_t, ndim=2] right_edge,
-                  np.ndarray[np.int64_t, ndim=1] parent_ind,
-                  np.ndarray[np.int64_t, ndim=1] level,
-                  np.ndarray[np.int64_t, ndim=1] num_children) :
-
-        if parent_ind is None:
-            raise RuntimeError
-        cdef int i, j, k
-        cdef np.ndarray[np.int64_t, ndim=1] child_ptr
-
-        child_ptr = np.zeros(num_grids, dtype='int64')
-
-        self.num_grids = num_grids
-        self.num_root_grids = 0
-        self.num_leaf_grids = 0
-        
-        self.grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
-                                             num_grids)
-                
-        for i in range(num_grids) :
-
-            self.grids[i] = Grid_initialize(left_edge[i,:],
-                                            right_edge[i,:],
-                                            num_children[i],
-                                            level[i], i)
-            if level[i] == 0 :
-                self.num_root_grids += 1
-
-            if num_children[i] == 0 : self.num_leaf_grids += 1
-
-        self.root_grids = <GridTreeNode *> malloc(sizeof(GridTreeNode) *
-                                                  self.num_root_grids)
-                
-        k = 0
-        
-        for i in range(num_grids) :
-
-            j = parent_ind[i]
-            
-            if j >= 0:
-                
-                self.grids[j].children[child_ptr[j]] = &self.grids[i]
-
-                child_ptr[j] += 1
-
-            else :
-
-                if k >= self.num_root_grids:
-                    raise RuntimeError
-                self.root_grids[k] = self.grids[i] 
-                
-                k = k + 1
-    
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def return_tree_info(self) :
-
-        cdef int i, j
-        
-        levels = []
-        indices = []
-        nchild = []
-        children = []
-        
-        for i in range(self.num_grids) : 
-
-            childs = []
-            
-            levels.append(self.grids[i].level)
-            indices.append(self.grids[i].index)
-            nchild.append(self.grids[i].num_children)
-            for j in range(self.grids[i].num_children) :
-                childs.append(self.grids[i].children[j].index)
-            children.append(childs)
-
-        return indices, levels, nchild, children
-    
-cdef class MatchPointsToGrids :
-
-    cdef int num_points
-    cdef np.float64_t * xp
-    cdef np.float64_t * yp
-    cdef np.float64_t * zp
-    cdef GridTree tree
-    cdef np.int64_t * point_grids
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    def __cinit__(self, GridTree tree,
-                  int num_points, 
-                  np.ndarray[np.float64_t, ndim=1] x,
-                  np.ndarray[np.float64_t, ndim=1] y,
-                  np.ndarray[np.float64_t, ndim=1] z) :
-
-        cdef int i
-        
-        self.num_points = num_points
-
-        self.xp = <np.float64_t *> malloc(sizeof(np.float64_t) *
-                                          num_points)
-        self.yp = <np.float64_t *> malloc(sizeof(np.float64_t) *
-                                          num_points)
-        self.zp = <np.float64_t *> malloc(sizeof(np.float64_t) *
-                                          num_points)
-        self.point_grids = <np.int64_t *> malloc(sizeof(np.int64_t) *
-                                              num_points)
-        
-        for i in range(num_points) :
-            self.xp[i] = x[i]
-            self.yp[i] = y[i]
-            self.zp[i] = z[i]
-            self.point_grids[i] = -1
-            
-        self.tree = tree
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    def find_points_in_tree(self) :
-
-        cdef np.ndarray[np.int64_t, ndim=1] pt_grids
-        cdef int i, j
-        cdef np.uint8_t in_grid
-        
-        pt_grids = np.zeros(self.num_points, dtype='int64')
-
-        for i in range(self.num_points) :
-
-            in_grid = 0
-            
-            for j in range(self.tree.num_root_grids) :
-
-                if not in_grid : 
-                    in_grid = self.check_position(i, self.xp[i], self.yp[i], self.zp[i],
-                                                  &self.tree.root_grids[j])
-
-        for i in range(self.num_points) :
-            pt_grids[i] = self.point_grids[i]
-        
-        return pt_grids
-
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    cdef np.uint8_t check_position(self,
-                                   np.int64_t pt_index, 
-                                   np.float64_t x,
-                                   np.float64_t y,
-                                   np.float64_t z,
-                                   GridTreeNode * grid) :
-
-        cdef int i
-        cdef np.uint8_t in_grid
-	
-        in_grid = self.is_in_grid(x, y, z, grid)
-
-        if in_grid :
-
-            if grid.num_children > 0 :
-
-                in_grid = 0
-                
-                for i in range(grid.num_children) :
-
-                    if not in_grid :
-
-                        in_grid = self.check_position(pt_index, x, y, z, grid.children[i])
-
-                if not in_grid :
-                    self.point_grids[pt_index] = grid.index
-                    in_grid = 1
-                    
-            else :
-
-                self.point_grids[pt_index] = grid.index
-                in_grid = 1
-                
-        return in_grid
-    
-    @cython.boundscheck(False)
-    @cython.wraparound(False)
-    @cython.cdivision(True)
-    cdef np.uint8_t is_in_grid(self,
-			 np.float64_t x,
-			 np.float64_t y,
-			 np.float64_t z,
-			 GridTreeNode * grid) :
-
-        cdef np.uint8_t xcond, ycond, zcond, cond
-            
-        xcond = x >= grid.left_edge[0] and x < grid.right_edge[0]
-        ycond = y >= grid.left_edge[1] and y < grid.right_edge[1]
-        zcond = z >= grid.left_edge[2] and z < grid.right_edge[2]
-	
-        cond = xcond and ycond
-        cond = cond and zcond
-
-        return cond

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/api.py
--- a/yt/utilities/lib/api.py
+++ b/yt/utilities/lib/api.py
@@ -26,7 +26,6 @@
 from .QuadTree import *
 from .RayIntegrators import *
 from .marching_cubes import *
-from .GridTree import *
 from .write_array import *
 from .mesh_utilities import *
 from .ContourFinding import *

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/bitarray.pxd
--- /dev/null
+++ b/yt/utilities/lib/bitarray.pxd
@@ -0,0 +1,39 @@
+"""
+Bit array functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef inline void ba_set_value(np.uint8_t *buf, np.uint64_t ind,
+                              np.uint8_t val) nogil:
+    if val > 0: val = 1
+    buf[ind >> 3] |= (val << (ind & 7))
+
+cdef inline np.uint8_t ba_get_value(np.uint8_t *buf, np.uint64_t ind) nogil:
+    cdef np.uint8_t rv = (buf[ind >> 3] & (1 << (ind & 7)))
+    if rv == 0: return 0
+    return 1
+
+cdef class bitarray:
+    cdef np.uint8_t *buf
+    cdef np.uint64_t size
+    cdef np.uint64_t buf_size # Not exactly the same
+    cdef public object ibuf
+
+    cdef void _set_value(self, np.uint64_t ind, np.uint8_t val)
+    cdef np.uint8_t _query_value(self, np.uint64_t ind)
+    #cdef void set_range(self, np.uint64_t ind, np.uint64_t count, int val)
+    #cdef int query_range(self, np.uint64_t ind, np.uint64_t count, int *val)

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/bitarray.pyx
--- /dev/null
+++ b/yt/utilities/lib/bitarray.pyx
@@ -0,0 +1,169 @@
+"""
+Bit array functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2015, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free
+
+cdef class bitarray:
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def __init__(self, size = -1, arr = None):
+        r"""This is a bitarray, which flips individual bits to on/off inside a
+        uint8 container array.
+
+        By encoding on/off inside each bit in a uint8 array, we can compress
+        boolean information down by up to a factor of 8.  Either an input array
+        or a size must be provided.
+
+        Parameters
+        ----------
+        size : int
+            The size we should pre-allocate.
+        arr : array-like
+            An input array to turn into a bitarray.
+
+        Examples
+        --------
+
+        >>> arr_in1 = np.array([True, True, False])
+        >>> arr_in2 = np.array([False, True, True])
+        >>> a = ba.bitarray(arr = arr_in1)
+        >>> b = ba.bitarray(arr = arr_in2)
+        >>> print a & b
+        >>> print (a & b).as_bool_array()
+
+        """
+        cdef np.uint64_t i
+        if size == -1 and arr is None:
+            raise RuntimeError
+        elif size == -1:
+            size = arr.size
+        elif size != -1 and arr is not None:
+            if size != arr.size:
+                raise RuntimeError
+        self.buf_size = (size >> 3)
+        if (size & 7) != 0:
+            # We need an extra one if we've got any lingering bits
+            self.buf_size += 1
+        cdef np.ndarray[np.uint8_t] ibuf_t
+        ibuf_t = self.ibuf = np.zeros(self.buf_size, "uint8")
+        self.buf = <np.uint8_t *> ibuf_t.data
+        self.size = size
+        if arr is not None:
+            self.set_from_array(arr)
+        else:
+            for i in range(self.buf_size):
+                self.buf[i] = 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def set_from_array(self, np.ndarray[np.uint8_t, cast=True] arr):
+        r"""Given an array that is either uint8_t or boolean, set the values of
+        this array to match it.
+
+        Parameters
+        ----------
+        arr : array, castable to uint8
+            The array we set from.
+        """
+        cdef np.uint64_t i, j, elem
+        cdef np.uint8_t *btemp = self.buf
+        arr = np.ascontiguousarray(arr)
+        j = 0
+        for i in range(self.size):
+            btemp[i >> 3] = btemp[i >> 3] | (arr[i] << (j))
+            j += 1
+            if j == 8:
+                j = 0
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def as_bool_array(self):
+        r"""Return a copy of this array, as a boolean array.
+
+        All of the values encoded in this bitarray are expanded into boolean
+        values in a new array and returned.
+
+        Returns
+        -------
+        arr : numpy array of type bool
+            The uint8 values expanded into boolean values
+
+        """
+        cdef np.uint64_t i, j, elem
+        cdef np.uint8_t *btemp = self.buf
+        cdef np.ndarray[np.uint8_t, ndim=1] output
+        output = np.zeros(self.size, "uint8")
+        j = 0
+        for i in range(self.size):
+            output[i] = (btemp[i >> 3] >> (j)) & 1
+            j += 1
+            if j == 8:
+                j = 0
+        return output.astype("bool")
+
+    cdef void _set_value(self, np.uint64_t ind, np.uint8_t val):
+        ba_set_value(self.buf, ind, val)
+
+    def set_value(self, np.uint64_t ind, np.uint8_t val):
+        r"""Set the on/off value of a given bit.
+
+        Modify the value encoded in a given index.
+
+        Parameters
+        ----------
+        ind : int
+            The index to query in the bitarray.
+        val : bool or uint8_t
+            What to set the index to
+
+        Examples
+        --------
+
+        >>> arr_in = np.array([True, True, False])
+        >>> a = ba.bitarray(arr = arr_in)
+        >>> print a.set_value(2, 1)
+
+        """
+        ba_set_value(self.buf, ind, val)
+
+    cdef np.uint8_t _query_value(self, np.uint64_t ind):
+        return ba_get_value(self.buf, ind)
+
+    def query_value(self, np.uint64_t ind):
+        r"""Query the on/off value of a given bit.
+
+        Return the value encoded in a given index.
+
+        Parameters
+        ----------
+        ind : int
+            The index to query in the bitarray.
+
+        Examples
+        --------
+
+        >>> arr_in = np.array([True, True, False])
+        >>> a = ba.bitarray(arr = arr_in)
+        >>> print a.query_value(2)
+
+        """
+        return ba_get_value(self.buf, ind)

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -54,6 +54,9 @@
     # always properly checked its header files (see
     # https://bugzilla.redhat.com/show_bug.cgi?id=494579 ) we simply disable
     # support for setjmp.
+    config.add_extension("bitarray", 
+                ["yt/utilities/lib/bitarray.pyx"],
+                libraries=["m"], depends=["yt/utilities/lib/bitarray.pxd"])
     config.add_extension("CICDeposit", 
                 ["yt/utilities/lib/CICDeposit.pyx"],
                 libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
@@ -143,9 +146,6 @@
                          ["yt/utilities/lib/write_array.pyx"])
     config.add_extension("ragged_arrays",
                          ["yt/utilities/lib/ragged_arrays.pyx"])
-    config.add_extension("GridTree", 
-    ["yt/utilities/lib/GridTree.pyx"],
-        libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
     config.add_extension("amr_kdtools", 
                          ["yt/utilities/lib/amr_kdtools.pyx"],
                          libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/tests/test_bitarray.py
--- /dev/null
+++ b/yt/utilities/lib/tests/test_bitarray.py
@@ -0,0 +1,41 @@
+import yt.utilities.lib.bitarray as ba
+import numpy as np
+from yt.testing import *
+
+def test_inout_bitarray():
+    # Check that we can do it for bitarrays that are funny-shaped
+    for i in range(7):
+        # Check we can feed in an array
+        arr_in = (np.random.random(32**3 + i) > 0.5)
+        b = ba.bitarray(arr = arr_in)
+        if i > 0:
+            yield assert_equal, b.ibuf.size, (32**3)/8.0 + 1
+        arr_out = b.as_bool_array()
+        yield assert_equal, arr_in, arr_out
+
+        # Let's check we can do it without feeding it at first
+        b = ba.bitarray(size = arr_in.size)
+        b.set_from_array(arr_in)
+        arr_out = b.as_bool_array()
+        yield assert_equal, arr_in, arr_out
+
+    # Try a big array
+    arr_in = (np.random.random(32**3 + i) > 0.5)
+    b = ba.bitarray(arr = arr_in)
+    arr_out = b.as_bool_array()
+    yield assert_equal, arr_in, arr_out
+
+    # Let's check we can do something interesting.
+    arr_in1 = (np.random.random(32**3) > 0.5)
+    arr_in2 = (np.random.random(32**3) > 0.5)
+    b1 = ba.bitarray(arr = arr_in1)
+    b2 = ba.bitarray(arr = arr_in2)
+    b3 = ba.bitarray(arr = (arr_in1 & arr_in2))
+    yield assert_equal, (b1.ibuf & b2.ibuf), b3.ibuf
+
+    b = ba.bitarray(10)
+    for i in range(10):
+        b.set_value(i, 2) # 2 should evaluate to True
+        arr = b.as_bool_array()
+        yield assert_equal, arr[:i+1].all(), True
+        yield assert_equal, arr[i+1:].any(), False

diff -r 3ecc1bff8a67aa8587ef41758a2569583d14bd3b -r b2c60e683290d14b189ca173bd874df95ee074bf yt/utilities/lib/tests/test_grid_tree.py
--- a/yt/utilities/lib/tests/test_grid_tree.py
+++ /dev/null
@@ -1,118 +0,0 @@
-"""
-Tests for GridTree
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import numpy as np
-import random
-
-from yt.testing import \
-    assert_equal, assert_raises
-from yt.frontends.stream.api import \
-    load_amr_grids
-
-
-def setup():
-    """Prepare setup specific environment"""
-    global test_ds
-
-    grid_data = [
-        dict(left_edge=[0.0, 0.0, 0.0], right_edge=[1.0, 1.0, 1.],
-             level=0, dimensions=[16, 16, 16]),
-        dict(left_edge=[0.25, 0.25, 0.25], right_edge=[0.75, 0.75, 0.75],
-             level=1, dimensions=[16, 16, 16]),
-        dict(left_edge=[0.25, 0.25, 0.375], right_edge=[0.5, 0.5, 0.625],
-             level=2, dimensions=[16, 16, 16]),
-        dict(left_edge=[0.5, 0.5, 0.375], right_edge=[0.75, 0.75, 0.625],
-             level=2, dimensions=[16, 16, 16]),
-        dict(left_edge=[0.3125, 0.3125, 0.4375],
-             right_edge=[0.4375, 0.4375, 0.5625],
-             level=3, dimensions=[16, 16, 16]),
-        dict(left_edge=[0.5625, 0.5625, 0.4375],
-             right_edge=[0.6875, 0.6875, 0.5625],
-             level=3, dimensions=[16, 16, 16])
-    ]
-
-    for grid in grid_data:
-        grid["density"] = \
-            np.random.random(grid["dimensions"]) * 2 ** grid["level"]
-    test_ds = load_amr_grids(grid_data, [16, 16, 16], 1.0)
-
-
-def test_grid_tree():
-    """Main test suite for GridTree"""
-    grid_tree = test_ds.index._get_grid_tree()
-    indices, levels, nchild, children = grid_tree.return_tree_info()
-
-    grid_levels = [grid.Level for grid in test_ds.index.grids]
-
-    grid_indices = [grid.id - grid._id_offset for grid in test_ds.index.grids]
-    grid_nchild = [len(grid.Children) for grid in test_ds.index.grids]
-
-    yield assert_equal, levels, grid_levels
-    yield assert_equal, indices, grid_indices
-    yield assert_equal, nchild, grid_nchild
-
-    for i, grid in enumerate(test_ds.index.grids):
-        if grid_nchild[i] > 0:
-            grid_children = np.array([child.id - child._id_offset
-                                      for child in grid.Children])
-            yield assert_equal, grid_children, children[i]
-
-def test_find_points():
-    """Main test suite for MatchPoints"""
-    num_points = 100
-    randx = np.random.uniform(low=test_ds.domain_left_edge[0],
-                              high=test_ds.domain_right_edge[0],
-                              size=num_points)
-    randy = np.random.uniform(low=test_ds.domain_left_edge[1],
-                              high=test_ds.domain_right_edge[1],
-                              size=num_points)
-    randz = np.random.uniform(low=test_ds.domain_left_edge[2],
-                              high=test_ds.domain_right_edge[2],
-                              size=num_points)
-
-    point_grids, point_grid_inds = test_ds.index._find_points(randx, randy, randz)
-
-    grid_inds = np.zeros((num_points), dtype='int64')
-
-    for ind, ixx, iyy, izz in zip(range(num_points), randx, randy, randz):
-
-        pos = np.array([ixx, iyy, izz])
-        pt_level = -1
-
-        for grid in test_ds.index.grids:
-
-            if np.all(pos >= grid.LeftEdge) and \
-               np.all(pos <= grid.RightEdge) and \
-               grid.Level > pt_level:
-                pt_level = grid.Level
-                grid_inds[ind] = grid.id - grid._id_offset
-
-    yield assert_equal, point_grid_inds, grid_inds
-
-    # Test wheter find_points works for lists
-    point_grids, point_grid_inds = test_ds.index._find_points(randx.tolist(),
-                                                              randy.tolist(),
-                                                              randz.tolist())
-    yield assert_equal, point_grid_inds, grid_inds
-
-    # Test if find_points works for scalar
-    ind = random.randint(0, num_points - 1)
-    point_grids, point_grid_inds = test_ds.index._find_points(randx[ind],
-                                                              randy[ind],
-                                                              randz[ind])
-    yield assert_equal, point_grid_inds, grid_inds[ind]
-
-    # Test if find_points fails properly for non equal indices' array sizes
-    yield assert_raises, AssertionError, test_ds.index._find_points, \
-        [0], 1.0, [2, 3]

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/529ab2935c34/
Changeset:   529ab2935c34
Branch:      yt
User:        chummels
Date:        2015-06-06 00:34:26+00:00
Summary:     Merging.
Affected #:  153 files

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -13,6 +13,7 @@
 yt/frontends/ramses/_ramses_reader.cpp
 yt/geometry/fake_octree.c
 yt/geometry/grid_container.c
+yt/geometry/grid_visitors.c
 yt/geometry/oct_container.c
 yt/geometry/oct_visitors.c
 yt/geometry/particle_deposit.c
@@ -25,6 +26,7 @@
 yt/utilities/spatial/ckdtree.c
 yt/utilities/lib/alt_ray_tracers.c
 yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/bitarray.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -39,6 +41,7 @@
 yt/utilities/lib/misc_utilities.c
 yt/utilities/lib/Octree.c
 yt/utilities/lib/origami.c
+yt/utilities/lib/pixelization_routines.c
 yt/utilities/lib/png_writer.c
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
@@ -59,3 +62,4 @@
 doc/source/reference/api/generated/*
 doc/_temp/*
 doc/source/bootcamp/.ipynb_checkpoints/
+dist

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e .python-version
--- /dev/null
+++ b/.python-version
@@ -0,0 +1,1 @@
+2.7.9

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e README
--- a/README
+++ b/README
@@ -20,4 +20,4 @@
 For more information on installation, what to do if you run into problems, or 
 ways to help development, please visit our website.
 
-Enjoy!
+Enjoy!
\ No newline at end of file

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e distribute_setup.py
--- a/distribute_setup.py
+++ /dev/null
@@ -1,541 +0,0 @@
-#!python
-"""Bootstrap distribute installation
-
-If you want to use setuptools in your package's setup.py, just include this
-file in the same directory with it, and add this to the top of your setup.py::
-
-    from distribute_setup import use_setuptools
-    use_setuptools()
-
-If you want to require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, you can do so by supplying
-the appropriate options to ``use_setuptools()``.
-
-This file can also be run as a script to install or upgrade setuptools.
-"""
-import os
-import shutil
-import sys
-import time
-import fnmatch
-import tempfile
-import tarfile
-import optparse
-
-from distutils import log
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-try:
-    import subprocess
-
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        return subprocess.call(args) == 0
-
-except ImportError:
-    # will be used for python 2.3
-    def _python_cmd(*args):
-        args = (sys.executable,) + args
-        # quoting arguments if windows
-        if sys.platform == 'win32':
-            def quote(arg):
-                if ' ' in arg:
-                    return '"%s"' % arg
-                return arg
-            args = [quote(arg) for arg in args]
-        return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-
-DEFAULT_VERSION = "0.6.32"
-DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
-SETUPTOOLS_FAKED_VERSION = "0.6c11"
-
-SETUPTOOLS_PKG_INFO = """\
-Metadata-Version: 1.0
-Name: setuptools
-Version: %s
-Summary: xxxx
-Home-page: xxx
-Author: xxx
-Author-email: xxx
-License: xxx
-Description: xxx
-""" % SETUPTOOLS_FAKED_VERSION
-
-
-def _install(tarball, install_args=()):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # installing
-        log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _build_egg(egg, tarball, to_dir):
-    # extracting the tarball
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        tar = tarfile.open(tarball)
-        _extractall(tar)
-        tar.close()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-
-        # building an egg
-        log.warn('Building a Distribute egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    egg = os.path.join(to_dir, 'distribute-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        tarball = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, tarball, to_dir)
-    sys.path.insert(0, egg)
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                   to_dir=os.curdir, download_delay=15, no_fake=True):
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    was_imported = 'pkg_resources' in sys.modules or \
-        'setuptools' in sys.modules
-    try:
-        try:
-            import pkg_resources
-            if not hasattr(pkg_resources, '_distribute'):
-                if not no_fake:
-                    _fake_setuptools()
-                raise ImportError
-        except ImportError:
-            return _do_download(version, download_base, to_dir, download_delay)
-        try:
-            pkg_resources.require("distribute>=" + version)
-            return
-        except pkg_resources.VersionConflict:
-            e = sys.exc_info()[1]
-            if was_imported:
-                sys.stderr.write(
-                "The required version of distribute (>=%s) is not available,\n"
-                "and can't be installed while this script is running. Please\n"
-                "install a more recent version first, using\n"
-                "'easy_install -U distribute'."
-                "\n\n(Currently using %r)\n" % (version, e.args[0]))
-                sys.exit(2)
-            else:
-                del pkg_resources, sys.modules['pkg_resources']    # reload ok
-                return _do_download(version, download_base, to_dir,
-                                    download_delay)
-        except pkg_resources.DistributionNotFound:
-            return _do_download(version, download_base, to_dir,
-                                download_delay)
-    finally:
-        if not no_fake:
-            _create_fake_setuptools_pkg_info(to_dir)
-
-
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-                        to_dir=os.curdir, delay=15):
-    """Download distribute from a specified location and return its filename
-
-    `version` should be a valid distribute version number that is available
-    as an egg for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    tgz_name = "distribute-%s.tar.gz" % version
-    url = download_base + tgz_name
-    saveto = os.path.join(to_dir, tgz_name)
-    src = dst = None
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        try:
-            log.warn("Downloading %s", url)
-            src = urlopen(url)
-            # Read/write all in one block, so we don't create a corrupt file
-            # if the download is interrupted.
-            data = src.read()
-            dst = open(saveto, "wb")
-            dst.write(data)
-        finally:
-            if src:
-                src.close()
-            if dst:
-                dst.close()
-    return os.path.realpath(saveto)
-
-
-def _no_sandbox(function):
-    def __no_sandbox(*args, **kw):
-        try:
-            from setuptools.sandbox import DirectorySandbox
-            if not hasattr(DirectorySandbox, '_old'):
-                def violation(*args):
-                    pass
-                DirectorySandbox._old = DirectorySandbox._violation
-                DirectorySandbox._violation = violation
-                patched = True
-            else:
-                patched = False
-        except ImportError:
-            patched = False
-
-        try:
-            return function(*args, **kw)
-        finally:
-            if patched:
-                DirectorySandbox._violation = DirectorySandbox._old
-                del DirectorySandbox._old
-
-    return __no_sandbox
-
-
-def _patch_file(path, content):
-    """Will backup the file then patch it"""
-    existing_content = open(path).read()
-    if existing_content == content:
-        # already patched
-        log.warn('Already patched.')
-        return False
-    log.warn('Patching...')
-    _rename_path(path)
-    f = open(path, 'w')
-    try:
-        f.write(content)
-    finally:
-        f.close()
-    return True
-
-_patch_file = _no_sandbox(_patch_file)
-
-
-def _same_content(path, content):
-    return open(path).read() == content
-
-
-def _rename_path(path):
-    new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s to %s', path, new_name)
-    os.rename(path, new_name)
-    return new_name
-
-
-def _remove_flat_installation(placeholder):
-    if not os.path.isdir(placeholder):
-        log.warn('Unknown installation at %s', placeholder)
-        return False
-    found = False
-    for file in os.listdir(placeholder):
-        if fnmatch.fnmatch(file, 'setuptools*.egg-info'):
-            found = True
-            break
-    if not found:
-        log.warn('Could not locate setuptools*.egg-info')
-        return
-
-    log.warn('Moving elements out of the way...')
-    pkg_info = os.path.join(placeholder, file)
-    if os.path.isdir(pkg_info):
-        patched = _patch_egg_dir(pkg_info)
-    else:
-        patched = _patch_file(pkg_info, SETUPTOOLS_PKG_INFO)
-
-    if not patched:
-        log.warn('%s already patched.', pkg_info)
-        return False
-    # now let's move the files out of the way
-    for element in ('setuptools', 'pkg_resources.py', 'site.py'):
-        element = os.path.join(placeholder, element)
-        if os.path.exists(element):
-            _rename_path(element)
-        else:
-            log.warn('Could not find the %s element of the '
-                     'Setuptools distribution', element)
-    return True
-
-_remove_flat_installation = _no_sandbox(_remove_flat_installation)
-
-
-def _after_install(dist):
-    log.warn('After install bootstrap.')
-    placeholder = dist.get_command_obj('install').install_purelib
-    _create_fake_setuptools_pkg_info(placeholder)
-
-
-def _create_fake_setuptools_pkg_info(placeholder):
-    if not placeholder or not os.path.exists(placeholder):
-        log.warn('Could not find the install location')
-        return
-    pyver = '%s.%s' % (sys.version_info[0], sys.version_info[1])
-    setuptools_file = 'setuptools-%s-py%s.egg-info' % \
-            (SETUPTOOLS_FAKED_VERSION, pyver)
-    pkg_info = os.path.join(placeholder, setuptools_file)
-    if os.path.exists(pkg_info):
-        log.warn('%s already exists', pkg_info)
-        return
-
-    log.warn('Creating %s', pkg_info)
-    try:
-        f = open(pkg_info, 'w')
-    except EnvironmentError:
-        log.warn("Don't have permissions to write %s, skipping", pkg_info)
-        return
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-
-    pth_file = os.path.join(placeholder, 'setuptools.pth')
-    log.warn('Creating %s', pth_file)
-    f = open(pth_file, 'w')
-    try:
-        f.write(os.path.join(os.curdir, setuptools_file))
-    finally:
-        f.close()
-
-_create_fake_setuptools_pkg_info = _no_sandbox(
-    _create_fake_setuptools_pkg_info
-)
-
-
-def _patch_egg_dir(path):
-    # let's check if it's already patched
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    if os.path.exists(pkg_info):
-        if _same_content(pkg_info, SETUPTOOLS_PKG_INFO):
-            log.warn('%s already patched.', pkg_info)
-            return False
-    _rename_path(path)
-    os.mkdir(path)
-    os.mkdir(os.path.join(path, 'EGG-INFO'))
-    pkg_info = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
-    f = open(pkg_info, 'w')
-    try:
-        f.write(SETUPTOOLS_PKG_INFO)
-    finally:
-        f.close()
-    return True
-
-_patch_egg_dir = _no_sandbox(_patch_egg_dir)
-
-
-def _before_install():
-    log.warn('Before install bootstrap.')
-    _fake_setuptools()
-
-
-def _under_prefix(location):
-    if 'install' not in sys.argv:
-        return True
-    args = sys.argv[sys.argv.index('install') + 1:]
-    for index, arg in enumerate(args):
-        for option in ('--root', '--prefix'):
-            if arg.startswith('%s=' % option):
-                top_dir = arg.split('root=')[-1]
-                return location.startswith(top_dir)
-            elif arg == option:
-                if len(args) > index:
-                    top_dir = args[index + 1]
-                    return location.startswith(top_dir)
-        if arg == '--user' and USER_SITE is not None:
-            return location.startswith(USER_SITE)
-    return True
-
-
-def _fake_setuptools():
-    log.warn('Scanning installed packages')
-    try:
-        import pkg_resources
-    except ImportError:
-        # we're cool
-        log.warn('Setuptools or Distribute does not seem to be installed.')
-        return
-    ws = pkg_resources.working_set
-    try:
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools', replacement=False)
-            )
-    except TypeError:
-        # old distribute API
-        setuptools_dist = ws.find(
-            pkg_resources.Requirement.parse('setuptools')
-        )
-
-    if setuptools_dist is None:
-        log.warn('No setuptools distribution found')
-        return
-    # detecting if it was already faked
-    setuptools_location = setuptools_dist.location
-    log.warn('Setuptools installation detected at %s', setuptools_location)
-
-    # if --root or --preix was provided, and if
-    # setuptools is not located in them, we don't patch it
-    if not _under_prefix(setuptools_location):
-        log.warn('Not patching, --root or --prefix is installing Distribute'
-                 ' in another location')
-        return
-
-    # let's see if its an egg
-    if not setuptools_location.endswith('.egg'):
-        log.warn('Non-egg installation')
-        res = _remove_flat_installation(setuptools_location)
-        if not res:
-            return
-    else:
-        log.warn('Egg installation')
-        pkg_info = os.path.join(setuptools_location, 'EGG-INFO', 'PKG-INFO')
-        if (os.path.exists(pkg_info) and
-            _same_content(pkg_info, SETUPTOOLS_PKG_INFO)):
-            log.warn('Already patched.')
-            return
-        log.warn('Patching...')
-        # let's create a fake egg replacing setuptools one
-        res = _patch_egg_dir(setuptools_location)
-        if not res:
-            return
-    log.warn('Patching complete.')
-    _relaunch()
-
-
-def _relaunch():
-    log.warn('Relaunching...')
-    # we have to relaunch the process
-    # pip marker to avoid a relaunch bug
-    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
-    _cmd2 = ['-c', 'install', '--record']
-    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
-        sys.argv[0] = 'setup.py'
-    args = [sys.executable] + sys.argv
-    sys.exit(subprocess.call(args))
-
-
-def _extractall(self, path=".", members=None):
-    """Extract all members from the archive to the current working
-       directory and set owner, modification time and permissions on
-       directories afterwards. `path' specifies a different directory
-       to extract to. `members' is optional and must be a subset of the
-       list returned by getmembers().
-    """
-    import copy
-    import operator
-    from tarfile import ExtractError
-    directories = []
-
-    if members is None:
-        members = self
-
-    for tarinfo in members:
-        if tarinfo.isdir():
-            # Extract directories with a safe mode.
-            directories.append(tarinfo)
-            tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448  # decimal for oct 0700
-        self.extract(tarinfo, path)
-
-    # Reverse sort directories.
-    if sys.version_info < (2, 4):
-        def sorter(dir1, dir2):
-            return cmp(dir1.name, dir2.name)
-        directories.sort(sorter)
-        directories.reverse()
-    else:
-        directories.sort(key=operator.attrgetter('name'), reverse=True)
-
-    # Set correct owner, mtime and filemode on directories.
-    for tarinfo in directories:
-        dirpath = os.path.join(path, tarinfo.name)
-        try:
-            self.chown(tarinfo, dirpath)
-            self.utime(tarinfo, dirpath)
-            self.chmod(tarinfo, dirpath)
-        except ExtractError:
-            e = sys.exc_info()[1]
-            if self.errorlevel > 1:
-                raise
-            else:
-                self._dbg(1, "tarfile: %s" % e)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the distribute package
-    """
-    install_args = []
-    if options.user_install:
-        if sys.version_info < (2, 6):
-            log.warn("--user requires Python 2.6 or later")
-            raise SystemExit(1)
-        install_args.append('--user')
-    return install_args
-
-def _parse_args():
-    """
-    Parse the command line for options
-    """
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the distribute package')
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-def main(version=DEFAULT_VERSION):
-    """Install or upgrade setuptools and EasyInstall"""
-    options = _parse_args()
-    tarball = download_setuptools(download_base=options.download_base)
-    return _install(tarball, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/helper_scripts/update_recipes.py
--- a/doc/helper_scripts/update_recipes.py
+++ b/doc/helper_scripts/update_recipes.py
@@ -8,7 +8,7 @@
 
 .. note::
    All of these scripts are located in the mercurial repository at
-   http://hg.yt-project.org/cookbook/
+   http://bitbucket.org/yt_analysis/cookbook/
 
 """
 footer = """ """
@@ -23,7 +23,7 @@
     recipes = cStringIO.StringIO()
 recipes.write(header)
 
-url = "here: http://hg.yt-project.org/cookbook/raw/tip/%s ."
+url = "here: http://bitbucket.org/yt_analysis/cookbook/raw/tip/%s ."
 
 def cond_output(f, v):
     if not v:
@@ -31,7 +31,7 @@
     return True
 
 repo = hg.repository(uii, "../cookbook/")
-commands.pull(uii, repo, "http://hg.yt-project.org/cookbook/")
+commands.pull(uii, repo, "http://bitbucket.org/yt_analysis/cookbook/")
 ctx = repo["tip"]
 for file in ctx:
     if not file.startswith("recipes/"): continue

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -1,18 +1,14 @@
 #
 # Hi there!  Welcome to the yt installation script.
 #
+# First things first, if you experience problems, please visit the Help 
+# section at http://yt-project.org.
+#
 # This script is designed to create a fully isolated Python installation
 # with the dependencies you need to run yt.
 #
-# There are a few options, but you only need to set *one* of them.  And
-# that's the next one, DEST_DIR.  But, if you want to use an existing HDF5
-# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of yt, you can set YT_DIR, too.  (It'll already
-# check the current directory and one up.
-#
-# If you experience problems, please visit the Help section at 
-# http://yt-project.org.
-#
+# There are a few options, but you only need to set *one* of them, which is 
+# the next one, DEST_DIR:
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
@@ -23,16 +19,25 @@
     DEST_DIR=${YT_DEST}
 fi
 
+# What follows are some other options that you may or may not need to change.
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
+# If you've got yt some other place, set this to point to it. The script will
+# already check the current directory and the one above it in the tree.
+YT_DIR=""
+
 # If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
 #NUMPY_ARGS="--fcompiler=fake"
 
+INST_PY3=0      # Install Python 3 along with Python 2. If this is turned
+                # on, all Python packages (including yt) will be installed
+                # in Python 3 (except Mercurial, which requires Python 2).
 INST_HG=1       # Install Mercurial or not?  If hg is not already
                 # installed, yt cannot be installed.
 INST_ZLIB=1     # On some systems (Kraken) matplotlib has issues with
@@ -50,9 +55,6 @@
 INST_ROCKSTAR=0 # Install the Rockstar halo finder?
 INST_SCIPY=0    # Install scipy?
 
-# If you've got yt some other place, set this to point to it.
-YT_DIR=""
-
 # If you need to pass anything to matplotlib, do so here.
 MPL_SUPP_LDFLAGS=""
 MPL_SUPP_CFLAGS=""
@@ -111,6 +113,7 @@
     echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
     echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
     echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE}
     echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
     echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
     echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
@@ -415,6 +418,10 @@
 get_willwont ${INST_SQLITE3}
 echo "be installing SQLite3"
 
+printf "%-15s = %s so I " "INST_PY3" "${INST_PY3}"
+get_willwont ${INST_PY3}
+echo "be installing Python 3"
+
 printf "%-15s = %s so I " "INST_HG" "${INST_HG}"
 get_willwont ${INST_HG}
 echo "be installing Mercurial"
@@ -487,6 +494,13 @@
     exit 1
 }
 
+if [ $INST_PY3 -eq 1 ]
+then
+	 PYTHON_EXEC='python3.4'
+else 
+	 PYTHON_EXEC='python2.7'
+fi
+
 function do_setup_py
 {
     [ -e $1/done ] && return
@@ -501,19 +515,27 @@
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
     BUILD_ARGS=""
+    if [[ $LIB =~ .*mercurial.* ]] 
+    then
+        PYEXE="python2.7"
+    else
+        PYEXE=${PYTHON_EXEC}
+    fi
     case $LIB in
         *h5py*)
-            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            pushd $LIB &> /dev/null
+            ( ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py configure --hdf5=${HDF5_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+            popd &> /dev/null
             ;;
         *numpy*)
-            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            if [ -e ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/numpy/__init__.py ]
             then
-                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                VER=$(${DEST_DIR}/bin/${PYTHON_EXEC} -c 'from distutils.version import StrictVersion as SV; \
                                                  import numpy; print SV(numpy.__version__) < SV("1.8.0")')
                 if [ $VER == "True" ]
                 then
                     echo "Removing previous NumPy instance (see issue #889)"
-                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                    rm -rf ${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/{numpy*,*.pth}
                 fi
             fi
             ;;
@@ -521,8 +543,8 @@
             ;;
     esac
     cd $LIB
-    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    ( ${DEST_DIR}/bin/${PYEXE} setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
 }
@@ -590,60 +612,64 @@
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
 export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages
 
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.20.2'
+PYTHON2='Python-2.7.9'
+PYTHON3='Python-3.4.3'
+CYTHON='Cython-0.22'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
-FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.3.1'
-HDF5='hdf5-1.8.14'
-IPYTHON='ipython-2.2.0'
+FREETYPE_VER='freetype-2.4.12' 
+H5PY='h5py-2.5.0'
+HDF5='hdf5-1.8.14' 
+IPYTHON='ipython-2.4.1'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.4.0'
-MERCURIAL='mercurial-3.1'
-NOSE='nose-1.3.4'
-NUMPY='numpy-1.8.2'
-PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-14.3.1'
+MATPLOTLIB='matplotlib-1.4.3'
+MERCURIAL='mercurial-3.4'
+NOSE='nose-1.3.6'
+NUMPY='numpy-1.9.2'
+PYTHON_HGLIB='python-hglib-1.6'
+PYZMQ='pyzmq-14.5.0'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.14.0'
+SCIPY='scipy-0.15.1'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.5'
-TORNADO='tornado-4.0.1'
-ZEROMQ='zeromq-4.0.4'
+SYMPY='sympy-0.7.6'
+TORNADO='tornado-4.0.2'
+ZEROMQ='zeromq-4.0.5'
 ZLIB='zlib-1.2.8'
+SETUPTOOLS='setuptools-16.0'
 
 # Now we dump all our SHA512 files out.
-echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
+echo '856220fa579e272ac38dcef091760f527431ff3b98df9af6e68416fcf77d9659ac5abe5c7dee41331f359614637a4ff452033085335ee499830ed126ab584267  Cython-0.22.tar.gz' > Cython-0.22.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
+echo 'a42f28ed8e49f04cf89e2ea7434c5ecbc264e7188dcb79ab97f745adf664dd9ab57f9a913543731635f90859536244ac37dca9adf0fc2aa1b215ba884839d160  Python-2.7.9.tgz' > Python-2.7.9.tgz.sha512
+echo '609cc82586fabecb25f25ecb410f2938e01d21cde85dd3f8824fe55c6edde9ecf3b7609195473d3fa05a16b9b121464f5414db1a0187103b78ea6edfa71684a7  Python-3.4.3.tgz' > Python-3.4.3.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
+echo '4a83f9ae1855a7fad90133b327d426201c8ccfd2e7fbe9f39b2d61a2eee2f3ebe2ea02cf80f3d4e1ad659f8e790c173df8cc99b87d0b7ce63d34aa88cfdc7939  h5py-2.5.0.tar.gz' > h5py-2.5.0.tar.gz.sha512
 echo '4073fba510ccadaba41db0939f909613c9cb52ba8fb6c1062fc9118edc601394c75e102310be1af4077d07c9b327e6bbb1a6359939a7268dc140382d0c1e0199  hdf5-1.8.14.tar.gz' > hdf5-1.8.14.tar.gz.sha512
-echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
+echo 'a9cffc08ba10c47b0371b05664e55eee0562a30ef0d4bbafae79e52e5b9727906c45840c0918122c06c5672ac65e6eb381399f103e1a836aca003eda81b2acde  ipython-2.4.1.tar.gz' > ipython-2.4.1.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
-echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
-echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
-echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
-echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
-echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
+echo '51b0f58b2618b47b653e17e4f6b6a1215d3a3b0f1331ce3555cc7435e365d9c75693f289ce12fe3bf8f69fd57b663e545f0f1c2c94e81eaa661cac0689e125f5  matplotlib-1.4.3.tar.gz' > matplotlib-1.4.3.tar.gz.sha512
+echo 'a61b0d4cf528136991243bb23ac972c11c50ab5681d09f8b2d12cf7d37d3a9d76262f7fe6e7a1834bf6d03e8dc0ebbd9231da982e049e09830341dabefe5d064  mercurial-3.4.tar.gz' > mercurial-3.4.tar.gz.sha512
+echo 'd0cede08dc33a8ac0af0f18063e57f31b615f06e911edb5ca264575174d8f4adb4338448968c403811d9dcc60f38ade3164662d6c7b69b499f56f0984bb6283c  nose-1.3.6.tar.gz' > nose-1.3.6.tar.gz.sha512
+echo '70470ebb9afef5dfd0c83ceb7a9d5f1b7a072b1a9b54b04f04f5ed50fbaedd5b4906bd500472268d478f94df9e749a88698b1ff30f2d80258e7f3fec040617d9  numpy-1.9.2.tar.gz' > numpy-1.9.2.tar.gz.sha512
+echo 'bfd10455e74e30df568c4c4827140fb6cc29893b0e062ce1764bd52852ec7487a70a0f5ea53c3fca7886f5d36365c9f4db52b8c93cad35fb67beeb44a2d56f2d  python-hglib-1.6.tar.gz' > python-hglib-1.6.tar.gz.sha512
+echo '20164f7b05c308e0f089c07fc46b1c522094f3ac136f2e0bba84f19cb63dfd36152a2465df723dd4d93c6fbd2de4f0d94c160e2bbc353a92cfd680eb03cbdc87  pyzmq-14.5.0.tar.gz' > pyzmq-14.5.0.tar.gz.sha512
+echo 'fff4412d850c431a1b4e6ee3b17958ee5ab3beb81e6cb8a8e7d56d368751eaa8781d7c3e69d932dc002d718fddc66a72098acfe74cfe29ec80b24e6736317275  scipy-0.15.1.tar.gz' > scipy-0.15.1.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
-echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
-echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
+echo 'ce0f1a17ac01eb48aec31fc0ad431d9d7ed9907f0e8584a6d79d0ffe6864fe62e203fe3f2a3c3e4e3d485809750ce07507a6488e776a388a7a9a713110882fcf  sympy-0.7.6.tar.gz' > sympy-0.7.6.tar.gz.sha512
+echo '93591068dc63af8d50a7925d528bc0cccdd705232c529b6162619fe28dddaf115e8a460b1842877d35160bd7ed480c1bd0bdbec57d1f359085bd1814e0c1c242  tornado-4.0.2.tar.gz' > tornado-4.0.2.tar.gz.sha512
+echo '0d928ed688ed940d460fa8f8d574a9819dccc4e030d735a8c7db71b59287ee50fa741a08249e356c78356b03c2174f2f2699f05aa7dc3d380ed47d8d7bab5408  zeromq-4.0.5.tar.gz' > zeromq-4.0.5.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
+echo '38a89aad89dc9aa682dbfbca623e2f69511f5e20d4a3526c01aabbc7e93ae78f20aac566676b431e111540b41540a1c4f644ce4174e7ecf052318612075e02dc  setuptools-16.0.tar.gz' > setuptools-16.0.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
@@ -658,10 +684,11 @@
 [ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
-get_ytproject $PYTHON.tgz
+[ $INST_HG -eq 1 ] && get_ytproject $MERCURIAL.tar.gz
+[ $INST_PY3 -eq 1 ] && get_ytproject $PYTHON3.tgz
+get_ytproject $PYTHON2.tgz
 get_ytproject $NUMPY.tar.gz
 get_ytproject $MATPLOTLIB.tar.gz
-get_ytproject $MERCURIAL.tar.gz
 get_ytproject $IPYTHON.tar.gz
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
@@ -669,6 +696,7 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
+get_ytproject $SETUPTOOLS.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -785,11 +813,11 @@
     fi
 fi
 
-if [ ! -e $PYTHON/done ]
+if [ ! -e $PYTHON2/done ]
 then
-    echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
-    cd $PYTHON
+    echo "Installing Python 2. This may take a while, but don't worry. yt loves you."
+    [ ! -e $PYTHON2 ] && tar xfz $PYTHON2.tgz
+    cd $PYTHON2
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -800,7 +828,30 @@
     cd ..
 fi
 
-export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages/
+if [ $INST_PY3 -eq 1 ]
+then
+    if [ ! -e $PYTHON3/done ]
+    then
+        echo "Installing Python 3. Because two Pythons are better than one."
+        [ ! -e $PYTHON3 ] && tar xfz $PYTHON3.tgz
+        cd $PYTHON3
+        ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+        ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/pyyt 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3.4 ${DEST_DIR}/bin/python 2>&1 ) 1>> ${LOG_FILE}
+        ( ln -sf ${DEST_DIR}/bin/python3-config ${DEST_DIR}/bin/python-config 2>&1 ) 1>> ${LOG_FILE}
+        ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
+        touch done
+        cd ..
+    fi
+fi
+
+export PYTHONPATH=${DEST_DIR}/lib/${PYTHON_EXEC}/site-packages/
+
+# Install setuptools
+do_setup_py $SETUPTOOLS
 
 if [ $INST_HG -eq 1 ]
 then
@@ -845,12 +896,10 @@
 
 # This fixes problems with gfortran linking.
 unset LDFLAGS
-
-echo "Installing distribute"
-( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
-
+ 
 echo "Installing pip"
-( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${GETFILE} https://bootstrap.pypa.io/get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( ${DEST_DIR}/bin/${PYTHON_EXEC} get-pip.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
 if [ $INST_SCIPY -eq 0 ]
 then
@@ -946,8 +995,8 @@
 fi
 
 do_setup_py $IPYTHON
+do_setup_py $CYTHON
 do_setup_py $H5PY
-do_setup_py $CYTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -984,13 +1033,14 @@
 
 echo "Installing yt"
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/${PYTHON_EXEC} setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
 touch done
 cd $MY_PWD
 
-if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import readline" 2>&1 )>> ${LOG_FILE}) || \
+	[[ "${MYOS##Darwin}" != "${MYOS}" && $INST_PY3 -eq 1 ]] 
 then
-    if !( ( ${DEST_DIR}/bin/python2.7 -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
+    if !( ( ${DEST_DIR}/bin/${PYTHON_EXEC} -c "import gnureadline" 2>&1 )>> ${LOG_FILE})
     then
         echo "Installing pure-python readline"
         ( ${DEST_DIR}/bin/pip install gnureadline 2>&1 ) 1>> ${LOG_FILE}

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/about/index.rst
--- a/doc/source/about/index.rst
+++ b/doc/source/about/index.rst
@@ -31,7 +31,7 @@
 `our members website. <http://yt-project.org/members.html>`_
 
 For an up-to-date list of everyone who has contributed to the yt codebase, 
-see the current `CREDITS <http://hg.yt-project.org/yt/src/yt/CREDITS>`_ file.  
+see the current `CREDITS <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ file.  
 For a more detailed breakup of contributions made by individual users, see out 
 `Open HUB page <https://www.openhub.net/p/yt_amr/contributors?query=&sort=commits>`_.
 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/analysis_modules/halo_finders.rst
--- a/doc/source/analyzing/analysis_modules/halo_finders.rst
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -65,9 +65,9 @@
 groups in six phase-space dimensions and one time dimension, which 
 allows for robust (grid-independent, shape-independent, and noise-
 resilient) tracking of substructure. The code is prepackaged with yt, 
-but also `separately available <http://code.google.com/p/rockstar>`_. The lead 
+but also `separately available <https://bitbucket.org/gfcstanford/rockstar>`_. The lead 
 developer is Peter Behroozi, and the methods are described in `Behroozi
-et al. 2011 <http://rockstar.googlecode.com/files/rockstar_ap101911.pdf>`_. 
+et al. 2011 <http://arxiv.org/abs/1110.4372>`_. 
 In order to run the Rockstar halo finder in yt, make sure you've 
 :ref:`installed it so that it can integrate with yt <rockstar-installation>`.
 
@@ -116,7 +116,7 @@
   the width of the smallest grid element in the simulation from the
   last data snapshot (i.e. the one where time has evolved the
   longest) in the time series:
-  ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  ``ds_last.index.get_smallest_dx() * ds_last['Mpch']``.
 * ``total_particles``, if supplied, this is a pre-calculated
   total number of dark matter
   particles present in the simulation. For example, this is useful

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/analysis_modules/star_analysis.rst
--- a/doc/source/analyzing/analysis_modules/star_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/star_analysis.rst
@@ -125,7 +125,7 @@
 tables of Bruzual & Charlot (hereafter B&C). Please see their `2003 paper
 <http://adsabs.harvard.edu/abs/2003MNRAS.344.1000B>`_ for more information
 and the `main data
-distribution page <http://www.cida.ve/~bruzual/bc2003>`_ for the original data.
+distribution page <http://www.bruzual.org/bc03/>`_ for the original data.
 Based on the mass, age and metallicity of each star, a cumulative spectrum is
 generated and can be output in two ways, either raw, or as a spectral
 energy distribution.

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -99,30 +99,35 @@
 mask out everything except the particles with which you are concerned.
 
 Creating a particle filter takes a few steps.  You must first define a 
-function which accepts a geometric object (e.g. all_data, sphere, etc.)
+function which accepts a data object (e.g. all_data, sphere, etc.)
 as its argument.  It uses the fields and information in this geometric
-object in order to produce some sort of conditional mask that is then returned.
-Here is the function to filter only the particles with `particle_type` (i.e. 
-field = `('all', 'particle_type')` equal to 2. (This is the case with
-Enzo star particles.)
+object in order to produce some sort of conditional mask that is then returned
+to create a new particle type.
+
+Here is a particle filter to create a new ``star`` particle type.  For Enzo
+simulations, stars have ``particle_type`` set to 2, so our filter will select
+only the particles with ``particle_type`` (i.e.  field = ``('all',
+'particle_type')`` equal to 2.
 
 .. code-block:: python
 
-    def Stars(pfilter, data):
-        filter = data[("all", "particle_type")] == 2
+    @yt.particle_filter(requires=["particle_type], filtered_type='all')
+    def stars(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 2
         return filter
 
-The particle_filter must now be defined to incorporate this function.  It takes
-a few arguments: a name for the filter, our filter function, and the fields
-that it requires in a dataset in order to work (in this case, it requires
-the ('all', 'particle_type') field.
+The :func:`~yt.data_objects.particle_filters.particle_filter` decorator takes a
+few options.  You must specify the names of the particle fields that are
+required in order to define the filter --- in this case the ``particle_type``
+field.  Additionally, you must specify the particle type to be filtered --- in
+this case we filter all the particle in dataset by specifying the ``all``
+particle type.
 
-.. code-block:: python
+In addition, you may specify a name for the newly defined particle type.  If no
+name is specified, the name for the particle type will be inferred from the name
+of the filter definition --- in this case the inferred name will be ``stars``.
 
-    from yt.data_objects.particle_filters import add_particle_filter
-    add_particle_filter("stars", function=Stars, filtered_type='all', requires=["particle_type"])
-
-And lastly, the filter must be applied to our dataset of choice.  Note that this 
+Lastly, the filter must be applied to our dataset of choice.  Note that this 
 filter can be added to as many datasets as we wish.  It will only actually
 create new filtered fields if the dataset has the required fields, though.
 
@@ -133,10 +138,27 @@
     ds.add_particle_filter('stars')
 
 And that's it!  We can now access all of the ('stars', field) fields from 
-our dataset `ds` and treat them as any other particle field.  In addition,
-it created some `deposit` fields, where the particles were deposited on to
+our dataset ``ds`` and treat them as any other particle field.  In addition,
+it created some ``deposit`` fields, where the particles were deposited on to
 the grid as mesh fields.
 
+As an alternative syntax, you can also define a new particle filter via the
+:func:`~yt.data_objects.particle_filter.add_particle_filter` function.  
+
+.. code-block:: python
+
+
+    def Stars(pfilter, data):
+        filter = data[(pfilter.filtered_type, "particle_type")] == 2
+        return filter
+
+    add_particle_filter("stars", function=Stars, filtered_type='all',
+                        requires=["particle_type"])
+
+This is equivalent to our use of the ``particle_filter`` decorator above.  The
+choice to use either the ``particle_filter`` decorator or the
+``add_particle_fitler`` function is a purely stylistic choice.
+
 .. notebook:: particle_filter.ipynb
 
 .. _particle-unions:
@@ -172,7 +194,7 @@
 
 Creating geometric objects for a dataset provides a means for filtering
 a field based on spatial location.  The most commonly used of these are
-spheres, regions (3D prisms), ellipsoids, disks, and rays.  The `all_data`
+spheres, regions (3D prisms), ellipsoids, disks, and rays.  The ``all_data``
 object which gets used throughout this documentation section is an example of 
 a geometric object, but it defaults to including all the data in the dataset
 volume.  To see all of the geometric objects available, see 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/generating_processed_data.rst
--- a/doc/source/analyzing/generating_processed_data.rst
+++ b/doc/source/analyzing/generating_processed_data.rst
@@ -47,10 +47,30 @@
    frb = FixedResolutionBuffer(sl, (0.3, 0.5, 0.6, 0.8), (512, 512))
    my_image = frb["density"]
 
-This resultant array can be saved out to disk or visualized using a
-hand-constructed Matplotlib image, for instance using
+This image may then be used in a hand-constructed Matplotlib image, for instance using
 :func:`~matplotlib.pyplot.imshow`.
 
+The buffer arrays can be saved out to disk in either HDF5 or FITS format:
+ 
+.. code-block:: python
+
+   frb.export_hdf5("my_images.h5", fields=["density","temperature"])
+   frb.export_fits("my_images.fits", fields=["density","temperature"],
+                   clobber=True, units="kpc")
+
+In the FITS case, there is an option for setting the ``units`` of the coordinate system in
+the file. If you want to overwrite a file with the same name, set ``clobber=True``. 
+
+The :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` can even be exported
+as a 2D dataset itself, which may be operated on in the same way as any other dataset in yt:
+
+.. code-block:: python
+
+   ds_frb = frb.export_dataset(fields=["density","temperature"], nprocs=8)
+   sp = ds_frb.sphere("c", (100.,"kpc"))
+
+where the ``nprocs`` parameter can be used to decompose the image into ``nprocs`` number of grids.
+
 .. _generating-profiles-and-histograms:
 
 Profiles and Histograms

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -500,6 +500,23 @@
 Additionally, we can store multiple objects in a single shelve file, so we 
 have to call the sphere by name.
 
+For certain data objects such as projections, serialization can be performed
+automatically if ``serialize`` option is set to ``True`` in :ref:`the
+configuration file <configuration-file>` or set directly in the script:
+
+.. code-block:: python
+
+   from yt.config import ytcfg; ytcfg["yt", "serialize"] = "True"
+
+.. note:: Use serialization with caution. Enabling serialization means that
+   once a projection of a dataset has been created (and stored in the .yt file
+   in the same directory), any subsequent changes to that dataset will be
+   ignored when attempting to create the same projection. So if you take a
+   density projection of your dataset in the 'x' direction, then somehow tweak
+   that dataset significantly, and take the density projection again, yt will
+   default to finding the original projection and 
+   :ref:`not your new one <faq-old-data>`.
+
 .. note:: It's also possible to use the standard :mod:`cPickle` module for
           loading and storing objects -- so in theory you could even save a
           list of objects!

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/parallel_computation.rst
--- a/doc/source/analyzing/parallel_computation.rst
+++ b/doc/source/analyzing/parallel_computation.rst
@@ -5,7 +5,7 @@
 
 yt has been instrumented with the ability to compute many -- most, even --
 quantities in parallel.  This utilizes the package 
-`mpi4py <http://code.google.com/p/mpi4py>`_ to parallelize using the Message
+`mpi4py <https://bitbucket.org/mpi4py/mpi4py>`_ to parallelize using the Message
 Passing Interface, typically installed on clusters.  
 
 .. _capabilities:
@@ -34,7 +34,7 @@
 --------------------------
 
 To run scripts in parallel, you must first install `mpi4py
-<http://code.google.com/p/mpi4py>`_ as well as an MPI library, if one is not
+<https://bitbucket.org/mpi4py/mpi4py>`_ as well as an MPI library, if one is not
 already available on your system.  Instructions for doing so are provided on the
 mpi4py website, but you may have luck by just running:
 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/particle_filter.ipynb
--- a/doc/source/analyzing/particle_filter.ipynb
+++ b/doc/source/analyzing/particle_filter.ipynb
@@ -79,10 +79,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.data_objects.particle_filters import add_particle_filter\n",
+      "yt.add_particle_filter(\"young_stars\", function=young_stars, filtered_type='Stars', requires=[\"creation_time\"])\n",
       "\n",
-      "add_particle_filter(\"young_stars\", function=young_stars, filtered_type='Stars', requires=[\"creation_time\"])\n",
-      "add_particle_filter(\"old_stars\", function=old_stars, filtered_type='Stars', requires=[\"creation_time\"])"
+      "yt.add_particle_filter(\"old_stars\", function=old_stars, filtered_type='Stars', requires=[\"creation_time\"])"
      ],
      "language": "python",
      "metadata": {},

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/time_series_analysis.rst
--- a/doc/source/analyzing/time_series_analysis.rst
+++ b/doc/source/analyzing/time_series_analysis.rst
@@ -79,9 +79,7 @@
 Analyzing an Entire Simulation
 ------------------------------
 
-.. note:: Currently only implemented for Enzo.  Other simulation types coming 
-   soon.  Until then, rely on the above prescription for creating 
-   ``DatasetSeries`` objects.
+.. note:: Implemented for: Enzo, Gadget, OWLS.
 
 The parameter file used to run a simulation contains all the information 
 necessary to know what datasets should be available.  The ``simulation`` 
@@ -93,8 +91,7 @@
 .. code-block:: python
 
   import yt
-  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo',
-                         find_outputs=False)
+  my_sim = yt.simulation('enzo_tiny_cosmology/32Mpc_32.enzo', 'Enzo')
 
 Then, create a ``DatasetSeries`` object with the 
 :meth:`frontends.enzo.simulation_handling.EnzoSimulation.get_time_series` 
@@ -123,10 +120,10 @@
 to select a subset of the total data:
 
 * ``time_data`` (*bool*): Whether or not to include time outputs when 
-  gathering datasets for time series.  Default: True.
+  gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``redshift_data`` (*bool*): Whether or not to include redshift outputs 
-  when gathering datasets for time series.  Default: True.
+  when gathering datasets for time series.  Default: True.  (Enzo only)
 
 * ``initial_time`` (*float*): The earliest time for outputs to be included.  
   If None, the initial time of the simulation is used.  This can be used in 
@@ -139,15 +136,12 @@
 * ``times`` (*list*): A list of times for which outputs will be found.
   Default: None.
 
-* ``time_units`` (*str*): The time units used for requesting outputs by time.
-  Default: '1' (code units).
-
 * ``initial_redshift`` (*float*): The earliest redshift for outputs to be 
   included.  If None, the initial redshift of the simulation is used.  This
   can be used in combination with either ``final_time`` or ``final_redshift``.
   Default: None.
 
-* ``final_time`` (*float*): The latest redshift for outputs to be included.  
+* ``final_redshift`` (*float*): The latest redshift for outputs to be included.  
   If None, the final redshift of the simulation is used.  This can be used 
   in combination with either ``initial_time`` or ``initial_redshift``.  
   Default: None.
@@ -157,11 +151,11 @@
 
 * ``initial_cycle`` (*float*): The earliest cycle for outputs to be 
   included.  If None, the initial cycle of the simulation is used.  This can
-  only be used with final_cycle.  Default: None.
+  only be used with final_cycle.  Default: None.  (Enzo only)
 
 * ``final_cycle`` (*float*): The latest cycle for outputs to be included.  
   If None, the final cycle of the simulation is used.  This can only be used 
-  in combination with initial_cycle.  Default: None.
+  in combination with initial_cycle.  Default: None.  (Enzo only)
 
 * ``tolerance`` (*float*):  Used in combination with ``times`` or ``redshifts`` 
   keywords, this is the tolerance within which outputs are accepted given 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- a/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:c7cfb2db456d127bb633b7eee7ad6fe14290aa622ac62694c7840d80137afaba"
+  "signature": "sha256:4d19ee42177c60fb4b39550b5acd7a0f7e97f59f5c2da3565ff42cdd580454b0"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -236,7 +236,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "q1 = yt.YTArray(1.0,\"C\") # Coulombs\n",
+      "q1 = yt.YTArray(1.0,\"C\") # coulombs\n",
       "q2 = yt.YTArray(1.0,\"esu\") # electrostatic units / statcoulomb\n",
       "\n",
       "print \"units =\", q1.in_mks().units, \", dims =\", q1.units.dimensions\n",
@@ -247,21 +247,14 @@
      "outputs": []
     },
     {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Under the hood, the `yt` units system has a translation layer that converts between these two systems, without any further effort required. For example:"
-     ]
-    },
-    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.utilities.physical_constants import elementary_charge\n",
+      "B1 = yt.YTArray(1.0,\"T\") # tesla\n",
+      "B2 = yt.YTArray(1.0,\"gauss\") # gauss\n",
       "\n",
-      "print elementary_charge\n",
-      "elementary_charge_C = elementary_charge.in_units(\"C\")\n",
-      "print elementary_charge_C"
+      "print \"units =\", B1.in_mks().units, \", dims =\", B1.units.dimensions\n",
+      "print \"units =\", B2.in_cgs().units, \", dims =\", B2.units.dimensions"
      ],
      "language": "python",
      "metadata": {},
@@ -271,13 +264,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The electromagnetic unit translations `yt` understands are:\n",
-      "\n",
-      "* Charge: 1 coulomb (C) $\\leftrightarrow$ 0.1c electrostatic unit (esu, Fr)\n",
-      "* Current: 1 ampere (A, C/s) $\\leftrightarrow$ 0.1c statampere (statA, esu/s, Fr) \n",
-      "* Magnetic Field: 1 tesla (T) $\\leftrightarrow 10^4$ gauss (G)\n",
-      "\n",
-      "where \"Fr\" is the franklin, an alternative name for the electrostatic unit, and c is the speed of light. "
+      "To convert between these two systems, use [Unit Equivalencies](unit_equivalencies.html)."
      ]
     },
     {

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
--- a/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
+++ b/doc/source/analyzing/units/6)_Unit_Equivalencies.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:b62d83c168828afa81bcf0603bb37d3183f2a810258f25963254ffb24a0acd82"
+  "signature": "sha256:f0bbee67b429d3fde768568adb475908cbbe04c428cafb5a45cd01d6b0de1745"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -34,6 +34,7 @@
      "collapsed": false,
      "input": [
       "import yt\n",
+      "from yt import YTQuantity\n",
       "import numpy as np\n",
       "\n",
       "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
@@ -56,7 +57,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Equivalencies can go in both directions, without any information required other than the unit you want to convert to:"
+      "Most equivalencies can go in both directions, without any information required other than the unit you want to convert to (this is not the case for the electromagnetic equivalencies, which we'll discuss later):"
      ]
     },
     {
@@ -130,6 +131,114 @@
      "level": 3,
      "metadata": {},
      "source": [
+      "Electromagnetic Equivalencies"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Special, one-way equivalencies exist for converting between electromagnetic units in the cgs and SI unit systems. These exist since in the cgs system, electromagnetic units are comprised of the base units of seconds, grams and centimeters, whereas in the SI system Ampere is a base unit. For example, the dimensions of charge are completely different in the two systems:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "Q1 = YTQuantity(1.0,\"C\")\n",
+      "Q2 = YTQuantity(1.0,\"esu\")\n",
+      "print \"Q1 dims =\", Q1.units.dimensions\n",
+      "print \"Q2 dims =\", Q2.units.dimensions\n",
+      "print \"Q1 base units =\", Q1.in_mks()\n",
+      "print \"Q2 base units =\", Q2.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To convert from a cgs unit to an SI unit, use the \"SI\" equivalency:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import qp # the elementary charge in esu\n",
+      "qp_SI = qp.to_equivalent(\"C\",\"SI\") # convert to Coulombs\n",
+      "print qp\n",
+      "print qp_SI"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To convert from an SI unit to a cgs unit, use the \"CGS\" equivalency:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "B = YTQuantity(1.0,\"T\") # magnetic field in Tesla\n",
+      "print B, B.to_equivalent(\"gauss\",\"CGS\") # convert to Gauss"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Equivalencies exist between the SI and cgs dimensions of charge, current, magnetic field, electric potential, and resistance. As a neat example, we can convert current in Amperes and resistance in Ohms to their cgs equivalents, and then use them to calculate the \"Joule heating\" of a conductor with resistance $R$ and current $I$:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "I = YTQuantity(1.0,\"A\")\n",
+      "I_cgs = I.to_equivalent(\"statA\",\"CGS\")\n",
+      "R = YTQuantity(1.0,\"ohm\")\n",
+      "R_cgs = R.to_equivalent(\"statohm\",\"CGS\")\n",
+      "P = I**2*R\n",
+      "P_cgs = I_cgs**2*R_cgs"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The dimensions of current and resistance in the two systems are completely different, but the formula gives us the power dissipated dimensions of energy per time, so the dimensions and the result should be the same, which we can check:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print P_cgs.units.dimensions == P.units.dimensions\n",
+      "print P.in_units(\"W\"), P_cgs.in_units(\"W\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
       "Determining Valid Equivalencies"
      ]
     },

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/notebook_tutorial.rst
--- a/doc/source/cookbook/notebook_tutorial.rst
+++ b/doc/source/cookbook/notebook_tutorial.rst
@@ -19,7 +19,7 @@
 Depending on your default web browser and system setup this will open a web
 browser and direct you to the notebook dahboard.  If it does not,  you might
 need to connect to the notebook manually.  See the `IPython documentation
-<http://ipython.org/ipython-doc/stable/interactive/notebook.html#starting-the-notebook-server>`_
+<http://ipython.org/ipython-doc/stable/notebook/notebook.html#starting-the-notebook-server>`_
 for more details.
 
 For the notebook tutorial, we rely on example notebooks that are part of the
@@ -28,8 +28,4 @@
 your own computer, simply download the notebook by clicking the 'Download
 Notebook' link in the top right corner of each page.
 
-1. `Running Code in the IPython Notebook <http://nbviewer.ipython.org/url/github.com/ipython/ipython/raw/master/examples/notebooks/Part%201%20-%20Running%20Code.ipynb>`_
-2. `Basic Output <http://nbviewer.ipython.org/url/github.com/ipython/ipython/raw/master/examples/notebooks/Part%202%20-%20Basic%20Output.ipynb>`_
-3. `Plotting with matplotlib <http://nbviewer.ipython.org/url/github.com/ipython/ipython/raw/master/examples/notebooks/Part%203%20-%20Plotting%20with%20Matplotlib.ipynb>`_
-4. `Markdown Cells <http://nbviewer.ipython.org/url/github.com/ipython/ipython/raw/master/examples/notebooks/Part%204%20-%20Markdown%20Cells.ipynb>`_
-5. `IPython's rich display system <http://nbviewer.ipython.org/url/github.com/ipython/ipython/raw/master/examples/notebooks/Part%205%20-%20Rich%20Display%20System.ipynb>`_
+1. `IPython Notebook Tutorials <http://nbviewer.ipython.org/github/ipython/ipython/blob/master/examples/Notebook/Index.ipynb>`_

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/particle_one_color_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_one_color_plot.py
@@ -0,0 +1,13 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', color='b')
+
+# zoom in a little bit
+p.set_width(500, 'kpc')
+
+#save result
+p.save()

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/particle_xvz_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_xvz_plot.py
@@ -0,0 +1,15 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_velocity_z', ['particle_mass'])
+
+# pick some appropriate units
+p.set_unit('particle_position_x', 'Mpc')
+p.set_unit('particle_velocity_z', 'km/s')
+p.set_unit('particle_mass', 'Msun')
+
+# save result
+p.save()

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/particle_xy_plot.py
--- /dev/null
+++ b/doc/source/cookbook/particle_xy_plot.py
@@ -0,0 +1,14 @@
+import yt
+
+# load the dataset
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+
+# create our plot
+p = yt.ParticlePlot(ds, 'particle_position_x', 'particle_position_y', 'particle_mass', width=(0.5, 0.5))
+
+# pick some appropriate units
+p.set_axes_unit('kpc')
+p.set_unit('particle_mass', 'Msun')
+
+#save result
+p.save()

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -15,7 +15,7 @@
 # We bump up our minimum to cut out some of the background fluid
 tf = yt.ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
-# Add three guassians, evenly spaced between the min and
+# Add three Gaussians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the
 # gist_stern colormap.
 tf.add_layers(3, w=0.02, colormap="gist_stern")

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/simple_plots.rst
--- a/doc/source/cookbook/simple_plots.rst
+++ b/doc/source/cookbook/simple_plots.rst
@@ -124,6 +124,41 @@
 
 .. yt_cookbook:: simple_off_axis_projection.py
 
+.. _cookbook-simple-particle-plot:
+
+Simple Particle Plot
+~~~~~~~~~~~~~~~~~~~~
+
+You can also use yt to make particle-only plots. This script shows how to
+plot all the particle x and y positions in a dataset, using the particle mass
+to set the color scale.
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_xy_plot.py
+
+.. _cookbook-non-spatial-particle-plot:
+
+Non-spatial Particle Plots
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You are not limited to plotting spatial fields on the x and y axes. This
+example shows how to plot the particle x-coordinates versus their z-velocities,
+again using the particle mass to set the colorbar. 
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_xvz_plot.py
+
+.. _cookbook-single-color-particle-plot:
+
+Single-color Particle Plots
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+If you don't want to display a third field on the color bar axis, simply pass
+in a color string instead of a particle field.
+See :ref:`particle-plots` for more information.
+
+.. yt_cookbook:: particle_one_color_plot.py
+
 .. _cookbook-simple_volume_rendering:
 
 Simple Volume Rendering

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/cookbook/simple_volume_rendering.py
--- a/doc/source/cookbook/simple_volume_rendering.py
+++ b/doc/source/cookbook/simple_volume_rendering.py
@@ -15,9 +15,9 @@
 # We bump up our minimum to cut out some of the background fluid
 tf = yt.ColorTransferFunction((np.log10(mi)+1, np.log10(ma)))
 
-# Add three guassians, evenly spaced between the min and
+# Add five Gaussians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the
-# gist_stern colormap.
+# spectral colormap.
 tf.add_layers(5, w=0.02, colormap="spectral")
 
 # Choose a center for the render.

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -5,9 +5,9 @@
 
 .. note:: If you already know how to use version control and are comfortable
    with handling it yourself, the quickest way to contribute to yt is to `fork
-   us on BitBucket <http://hg.yt-project.org/yt/fork>`_, `make your changes
+   us on BitBucket <http://bitbucket.org/yt_analysis/yt/fork>`_, `make your changes
    <http://mercurial.selenic.com/>`_, and issue a `pull request
-   <http://hg.yt-project.org/yt/pull-requests>`_.  The rest of this document is just an
+   <http://bitbucket.org/yt_analysis/yt/pull-requests>`_.  The rest of this document is just an
    explanation of how to do that.
 
 yt is a community project!
@@ -354,7 +354,7 @@
 --------------------------------------
 
 yt is hosted on BitBucket, and you can see all of the yt repositories at
-http://hg.yt-project.org/.  With the yt installation script you should have a
+http://bitbucket.org/yt_analysis/.  With the yt installation script you should have a
 copy of Mercurial for checking out pieces of code.  Make sure you have followed
 the steps above for bootstrapping your development (to assure you have a
 bitbucket account, etc.)

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -469,6 +469,8 @@
   first image in the primary file. If this is not the case,
   yt will raise a warning and will not load this field.
 
+.. _additional_fits_options:
+
 Additional Options
 ^^^^^^^^^^^^^^^^^^
 
@@ -570,6 +572,35 @@
 ``WCSAxes`` is still in an experimental state, but as its functionality improves it will be
 utilized more here.
 
+``create_spectral_slabs``
+"""""""""""""""""""""""""
+
+.. note::
+
+  The following functionality requires the `spectral-cube <http://spectral-cube.readthedocs.org>`_
+  library to be installed. 
+  
+If you have a spectral intensity dataset of some sort, and would like to extract emission in 
+particular slabs along the spectral axis of a certain width, ``create_spectral_slabs`` can be
+used to generate a dataset with these slabs as different fields. In this example, we use it
+to extract individual lines from an intensity cube:
+
+.. code-block:: python
+
+  slab_centers = {'13CN': (218.03117, 'GHz'),
+                  'CH3CH2CHO': (218.284256, 'GHz'),
+                  'CH3NH2': (218.40956, 'GHz')}
+  slab_width = (0.05, "GHz")
+  ds = create_spectral_slabs("intensity_cube.fits",
+                                    slab_centers, slab_width,
+                                    nan_mask=0.0)
+
+All keyword arguments to `create_spectral_slabs` are passed on to `load` when creating the dataset
+(see :ref:`additional_fits_options` above). In the returned dataset, the different slabs will be
+different fields, with the field names taken from the keys in ``slab_centers``. The WCS coordinates 
+on the spectral axis are reset so that the center of the domain along this axis is zero, and the 
+left and right edges of the domain along this axis are :math:`\pm` ``0.5*slab_width``.
+
 Examples of Using FITS Data
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -635,13 +666,14 @@
    import yt
    ds = yt.load("snapshot_061.hdf5")
 
-However, yt cannot detect raw-binary Gadget data, and so you must specify the
-format as being Gadget:
+Gadget data in raw binary format can also be loaded with the ``load`` command. 
+This is only supported for snapshots created with the ``SnapFormat`` parameter 
+set to 1 (the standard for Gadget-2).
 
 .. code-block:: python
 
    import yt
-   ds = yt.GadgetDataset("snapshot_061")
+   ds = yt.load("snapshot_061")
 
 .. _particle-bbox:
 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -211,7 +211,7 @@
 If you have gone through all of the above steps, and you're still encountering 
 problems, then you have found a bug.  
 To submit a bug report, you can either directly create one through the
-BitBucket `web interface <http://hg.yt-project.org/yt/issues/new>`_,
+BitBucket `web interface <http://bitbucket.org/yt_analysis/yt/issues/new>`_,
 or you can use the command line ``yt bugreport`` to interactively create one.
 Alternatively, email the ``yt-users`` mailing list and we will construct a new
 ticket in your stead.  Remember to include the information

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -67,7 +67,7 @@
 
 .. code-block:: bash
 
-  wget http://hg.yt-project.org/yt/raw/stable/doc/install_script.sh
+  wget http://bitbucket.org/yt_analysis/yt/raw/stable/doc/install_script.sh
 
 .. _installing-yt:
 
@@ -213,10 +213,31 @@
 ++++++++++++++++++++++++++++++++++++++
 
 To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
+installed on your system. 
+
+If you use a Linux OS, use your distro's package manager to install these yt
+dependencies on your system:
+
+- ``HDF5``
+- ``zeromq``
+- ``sqlite`` 
+- ``mercurial``
+
+Then install the required Python packages with ``pip``:
+
+.. code-block:: bash
+
+  $ pip install -r requirements.txt
+
+If you're using IPython notebooks, you can install its dependencies
+with ``pip`` as well:
+
+.. code-block:: bash
+
+  $ pip install -r optional-requirements.txt
+
+From here, you can use ``pip`` (which comes with ``Python``) to install the latest
+stable version of yt:
 
 .. code-block:: bash
 

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -30,6 +30,16 @@
    ~yt.visualization.profile_plotter.PhasePlot
    ~yt.visualization.profile_plotter.PhasePlotMPL
 
+Particle Plots
+^^^^^^^^^^^^^^
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.visualization.particle_plots.ParticleProjectionPlot
+   ~yt.visualization.particle_plots.ParticlePhasePlot
+   ~yt.visualization.particle_plots.ParticlePlot
+
 Fixed Resolution Pixelization
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
@@ -37,6 +47,7 @@
    :toctree: generated/
 
    ~yt.visualization.fixed_resolution.FixedResolutionBuffer
+   ~yt.visualization.fixed_resolution.ParticleImageBuffer
    ~yt.visualization.fixed_resolution.CylindricalFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer
    ~yt.visualization.fixed_resolution.OffAxisProjectionFixedResolutionBuffer
@@ -408,6 +419,7 @@
    ~yt.data_objects.profiles.Profile1D
    ~yt.data_objects.profiles.Profile2D
    ~yt.data_objects.profiles.Profile3D
+   ~yt.data_objects.profiles.ParticleProfile
    ~yt.data_objects.profiles.create_profile
 
 .. _halo_analysis_ref:
@@ -491,6 +503,16 @@
    ~yt.fields.field_info_container.FieldInfoContainer.add_field
    ~yt.data_objects.static_output.Dataset.add_field
 
+
+Particle Filters
+----------------
+
+.. autosummary::
+   :toctree: generated/
+
+   ~yt.data_objects.particle_filters.add_particle_filter
+   ~yt.data_objects.particle_filters.particle_filter
+
 Image Handling
 --------------
 
@@ -748,7 +770,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.parallel_simple_proxy
    ~yt.data_objects.data_containers.YTDataContainer.get_field_parameter
    ~yt.data_objects.data_containers.YTDataContainer.set_field_parameter
-   ~yt.visualization.plot_modifications.sanitize_coord_system
 
 Math Utilities
 --------------

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/reference/changelog.rst
--- a/doc/source/reference/changelog.rst
+++ b/doc/source/reference/changelog.rst
@@ -8,7 +8,7 @@
 Contributors
 ------------
 
-The `CREDITS file <http://hg.yt-project.org/yt/src/yt/CREDITS>`_ contains the
+The `CREDITS file <http://bitbucket.org/yt_analysis/yt/src/yt/CREDITS>`_ contains the
 most up-to-date list of everyone who has contributed to the yt source code.
 
 Version 3.1

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/reference/configuration.rst
--- a/doc/source/reference/configuration.rst
+++ b/doc/source/reference/configuration.rst
@@ -104,8 +104,9 @@
   IPython notebook created by ``yt notebook``.  Note that this should be an
   sha512 hash, not a plaintext password.  Starting ``yt notebook`` with no
   setting will provide instructions for setting this.
-* ``serialize`` (default: ``'True'``): Are we allowed to write to the ``.yt`` file?
-* ``sketchfab_api_key`` (default: empty): API key for http://sketchfab.com/ for
+* ``serialize`` (default: ``'False'``): If true, perform automatic 
+  :ref:`object serialization <object-serialization>`
+* ``sketchfab_api_key`` (default: empty): API key for https://sketchfab.com/ for
   uploading AMRSurface objects.
 * ``suppressStreamLogging`` (default: ``'False'``): If true, execution mode will be
   quiet.

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/reference/python_introduction.rst
--- a/doc/source/reference/python_introduction.rst
+++ b/doc/source/reference/python_introduction.rst
@@ -736,8 +736,8 @@
 Python and Related References
 +++++++++++++++++++++++++++++
     * `Python quickstart <http://docs.python.org/tutorial/>`_
-    * `Learn Python the Hard Way <http://learnpythonthehardway.org/index>`_
+    * `Learn Python the Hard Way <http://learnpythonthehardway.org/book/>`_
     * `Byte of Python <http://www.swaroopch.com/notes/Python>`_
     * `Dive Into Python <http://diveintopython.org>`_
-    * `Numpy docs <http://docs.numpy.org/>`_
-    * `Matplotlib docs <http://matplotlib.sf.net>`_
+    * `Numpy docs <http://docs.scipy.org/doc/numpy/>`_
+    * `Matplotlib docs <http://matplotlib.org>`_

diff -r b2c60e683290d14b189ca173bd874df95ee074bf -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e doc/source/visualizing/mapserver.rst
--- a/doc/source/visualizing/mapserver.rst
+++ b/doc/source/visualizing/mapserver.rst
@@ -4,7 +4,7 @@
 -----------------------------------------------------
 
 The mapserver is a new, experimental feature.  It's based on `Leaflet
-<http://leaflet.cloudmade.com/>`_, a library written to create zoomable,
+<http://leafletjs.com/>`_, a library written to create zoomable,
 map-tile interfaces.  (Similar to Google Maps.)  yt provides everything you
 need to start up a web server that will interactively re-pixelize an adaptive
 image.  This means you can explore your datasets in a fully pan-n-zoom

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/661d8ad11efb/
Changeset:   661d8ad11efb
Branch:      yt
User:        chummels
Date:        2015-06-06 00:39:08+00:00
Summary:     Changing TotalMass derived quantity to just return [gas_mass, particle_mass].
Affected #:  1 file

diff -r 529ab2935c34d85f6f18b56a0a7f0d6b78c0166e -r 661d8ad11efb94411b018617db8c08c74dfbe2ae yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -178,8 +178,8 @@
 class TotalMass(TotalQuantity):
     r"""
     Calculates the total mass of the object. Returns a YTArray where the
-    first element is total gas mass, the second element is total particle mass,
-    and the third element is the total mass in both particle/grid forms.
+    first element is total gas mass and the second element is total particle 
+    mass.
 
     Examples
     --------
@@ -201,7 +201,7 @@
             part = super(TotalMass, self).__call__([('all', 'particle_mass')])
         else:
             part = self.data_source.ds.arr([0], 'g')
-        return self.data_source.ds.arr([gas, part, np.sum([gas,part])])
+        return self.data_source.ds.arr([gas, part])
 
 class CenterOfMass(DerivedQuantity):
     r"""


https://bitbucket.org/yt_analysis/yt/commits/832060c8ef25/
Changeset:   832060c8ef25
Branch:      yt
User:        ngoldbaum
Date:        2015-07-09 16:45:29+00:00
Summary:     Merged in chummels/yt (pull request #1543)

slight [API_CHANGE] Generalizing derived quantity outputs to all be YTArrays or lists of YTArrays as appropriate
Affected #:  1 file

diff -r 7ec5b0eb212762924b344dcb9a94e04f475d5606 -r 832060c8ef256adf8a22bac6f60f8de418d4541e yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -96,6 +96,10 @@
     r"""
     Calculates the weight average of a field or fields.
 
+    Returns a YTQuantity for each field requested; if one,
+    it returns a single YTQuantity, if many, it returns a list of YTQuantities
+    in order of the listed fields.  
+
     Where f is the field and w is the weight, the weighted average is
     Sum_i(f_i \* w_i) / Sum_i(w_i).
 
@@ -173,8 +177,9 @@
 
 class TotalMass(TotalQuantity):
     r"""
-    Calculates the total mass in gas and particles. Returns a tuple where the
-    first part is total gas mass and the second part is total particle mass.
+    Calculates the total mass of the object. Returns a YTArray where the
+    first element is total gas mass and the second element is total particle 
+    mass.
 
     Examples
     --------
@@ -189,11 +194,14 @@
         fi = self.data_source.ds.field_info
         fields = []
         if ("gas", "cell_mass") in fi:
-            fields.append(("gas", "cell_mass"))
+            gas = super(TotalMass, self).__call__([('gas', 'cell_mass')])
+        else:
+            gas = self.data_source.ds.arr([0], 'g')
         if ("all", "particle_mass") in fi:
-            fields.append(("all", "particle_mass"))
-        rv = super(TotalMass, self).__call__(fields)
-        return rv
+            part = super(TotalMass, self).__call__([('all', 'particle_mass')])
+        else:
+            part = self.data_source.ds.arr([0], 'g')
+        return self.data_source.ds.arr([gas, part])
 
 class CenterOfMass(DerivedQuantity):
     r"""
@@ -330,7 +338,10 @@
 class WeightedVariance(DerivedQuantity):
     r"""
     Calculates the weighted variance and weighted mean for a field
-    or list of fields.
+    or list of fields. Returns a YTArray for each field requested; if one,
+    it returns a single YTArray, if many, it returns a list of YTArrays
+    in order of the listed fields.  The first element of each YTArray is
+    the weighted variance, and the second element is the weighted mean.
 
     Where f is the field, w is the weight, and <f_w> is the weighted mean,
     the weighted variance is
@@ -384,10 +395,10 @@
             my_mean = values[i]
             my_var2 = values[i + int(len(values) / 2)]
             all_mean = (my_weight * my_mean).sum(dtype=np.float64) / all_weight
-            rvals.append(np.sqrt((my_weight * (my_var2 +
-                                               (my_mean - all_mean)**2)).sum(dtype=np.float64) /
-                                               all_weight))
-            rvals.append(all_mean)
+            rvals.append(self.data_source.ds.arr([(np.sqrt((my_weight * 
+                                                 (my_var2 + (my_mean - 
+                                                  all_mean)**2)).sum(dtype=np.float64) 
+                                                  / all_weight)), all_mean]))
         return rvals
 
 class AngularMomentumVector(DerivedQuantity):
@@ -395,6 +406,7 @@
     Calculates the angular momentum vector, using gas and/or particles.
 
     The angular momentum vector is the mass-weighted mean specific angular momentum.
+    Returns a YTArray of the vector.
 
     Parameters
     ----------
@@ -416,10 +428,6 @@
 
     """
     def count_values(self, use_gas=True, use_particles=True):
-        use_gas &= \
-          (("gas", "cell_mass") in self.data_source.ds.field_info)
-        use_particles &= \
-          (("all", "particle_mass") in self.data_source.ds.field_info)
         num_vals = 0
         if use_gas: num_vals += 4
         if use_particles: num_vals += 4
@@ -453,11 +461,15 @@
             jy += values.pop(0).sum(dtype=np.float64)
             jz += values.pop(0).sum(dtype=np.float64)
             m  += values.pop(0).sum(dtype=np.float64)
-        return (jx / m, jy / m, jz / m)
+        return self.data_source.ds.arr([jx / m, jy / m, jz / m])
 
 class Extrema(DerivedQuantity):
     r"""
     Calculates the min and max value of a field or list of fields.
+    Returns a YTArray for each field requested.  If one, a single YTArray
+    is returned, if many, a list of YTArrays in order of field list is 
+    returned.  The first element of each YTArray is the minimum of the
+    field and the second is the maximum of the field.
 
     Parameters
     ----------
@@ -500,7 +512,7 @@
 
     def reduce_intermediate(self, values):
         # The values get turned into arrays here.
-        return [(mis.min(), mas.max() )
+        return [self.data_source.ds.arr([mis.min(), mas.max()])
                 for mis, mas in zip(values[::2], values[1::2])]
 
 class MaxLocation(DerivedQuantity):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list