[yt-svn] commit/yt: 27 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 22 14:19:39 PDT 2014


27 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3290181e3619/
Changeset:   3290181e3619
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-03-13 16:18:12
Summary:     Be much more careful about assigning clump IDs.
Affected #:  1 file

diff -r 6c0273b42d198d3ac270238feed0e5a5ace9899c -r 3290181e36197c342e5e7bf092bf0b3ba9a79c18 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -108,10 +108,21 @@
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        for cid in sorted(unique_contours):
             new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid + 1)],
+                    ["obj['Contours'] == %s" % cid],
                     {'contour_slices': cids})
+            if new_clump["Ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "Ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))


https://bitbucket.org/yt_analysis/yt/commits/832b0a3a7e7d/
Changeset:   832b0a3a7e7d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-18 16:00:13
Summary:     Fixing a set of typos in the field names.
Affected #:  1 file

diff -r 3290181e36197c342e5e7bf092bf0b3ba9a79c18 -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -117,10 +117,10 @@
                 unique_contours.update(np.unique(ff))
         for cid in sorted(unique_contours):
             new_clump = self.data.cut_region(
-                    ["obj['Contours'] == %s" % cid],
+                    ["obj['contours'] == %s" % cid],
                     {'contour_slices': cids})
-            if new_clump["Ones"].size == 0:
-                # This is to skip possibly duplicate clumps.  Using "Ones" here
+            if new_clump["ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "ones" here
                 # will speed things up.
                 continue
             self.children.append(Clump(new_clump, self, self.field,


https://bitbucket.org/yt_analysis/yt/commits/39a6dfb94bf4/
Changeset:   39a6dfb94bf4
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-18 18:58:16
Summary:     Fix a few additional issues with clump finding.
Affected #:  5 files

diff -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -5,7 +5,9 @@
 
 import yt
 from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
-                                                get_lowest_clumps)
+                                                get_lowest_clumps,
+                                                write_clump_index,
+                                                write_clumps)
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"  # parameter file to load
 # this is the field we look for contours over -- we could do
@@ -46,12 +48,12 @@
 # master-clump.  Among different ways we can examine it, there's a convenience
 # function for outputting the full index to a file.
 f = open('%s_clump_index.txt' % ds, 'w')
-yt.amods.level_sets.write_clump_index(master_clump, 0, f)
+write_clump_index(master_clump, 0, f)
 f.close()
 
 # We can also output some handy information, as well.
 f = open('%s_clumps.txt' % ds, 'w')
-yt.amods.level_sets.write_clumps(master_clump, 0, f)
+write_clumps(master_clump, 0, f)
 f.close()
 
 # We can traverse the clump index to get a list of all of the 'leaf' clumps

diff -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -116,8 +116,9 @@
             for sl, ff in sl_list:
                 unique_contours.update(np.unique(ff))
         for cid in sorted(unique_contours):
+            if cid == -1: continue
             new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % cid],
+                    ["obj['contours'] == %s" % (cid)],
                     {'contour_slices': cids})
             if new_clump["ones"].size == 0:
                 # This is to skip possibly duplicate clumps.  Using "ones" here

diff -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -16,6 +16,7 @@
 
 import types
 import numpy as np
+from contextlib import contextmanager
 
 from yt.funcs import *
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
@@ -718,6 +719,14 @@
             self.field_data[field] = self.base_object[field][ind]
 
     @property
+    def blocks(self):
+        # We have to take a slightly different approach here.  Note that all
+        # that .blocks has to yield is a 3D array and a mask.
+        for b, m in self.base_object.blocks:
+            m[~self._cond_ind] = 0
+            yield b, m
+
+    @property
     def _cond_ind(self):
         ind = None
         obj = self.base_object

diff -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -447,8 +447,6 @@
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
                     get_spos(vc0, i + oi, -1, j + oj, 1, spos)

diff -r 832b0a3a7e7d1cf6871bd9050edec969240beac7 -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -689,20 +689,20 @@
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
-            mylog.debug("Pixelizing contour %s", i)
+            mylog.info("Pixelizing contour %s", i)
 
-            xf_copy = clump[xf].copy()
-            yf_copy = clump[yf].copy()
+            xf_copy = clump[xf].copy().in_units("code_length")
+            yf_copy = clump[yf].copy().in_units("code_length")
 
             temp = _MPL.Pixelize(xf_copy, yf_copy,
-                                 clump[dxf]/2.0,
-                                 clump[dyf]/2.0,
-                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
+                                 clump[dxf].in_units("code_length")/2.0,
+                                 clump[dyf].in_units("code_length")/2.0,
+                                 clump[dxf].d*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
-                                     extent=extent,**self.plot_args)
+                                     extent=extent, **self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


https://bitbucket.org/yt_analysis/yt/commits/b4a6085f06eb/
Changeset:   b4a6085f06eb
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-18 22:08:11
Summary:     Clump IDs can cause name collisions.  This fixes it.
Affected #:  2 files

diff -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 -r b4a6085f06ebc5f497319dc92dd55d00854863fe yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -39,9 +39,9 @@
         node_ids.append(nid)
         values = g[field][sl].astype("float64")
         contour_ids = np.zeros(dims, "int64") - 1
-        gct.identify_contours(values, contour_ids, total_contours)
+        total_contours += gct.identify_contours(values, contour_ids,
+                                                total_contours)
         new_contours = tree.cull_candidates(contour_ids)
-        total_contours += new_contours.shape[0]
         tree.add_contours(new_contours)
         # Now we can create a partitioned grid with the contours.
         LE = (DLE + g.dds * gi).in_units("code_length").ndarray_view()

diff -r 39a6dfb94bf491fc03e7b2da98bfbdf9d0e97b47 -r b4a6085f06ebc5f497319dc92dd55d00854863fe yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -349,6 +349,7 @@
         for i in range(ni*nj*nk): 
             if container[i] != NULL: free(container[i])
         free(container)
+        return nc
 
 @cython.boundscheck(False)
 @cython.wraparound(False)


https://bitbucket.org/yt_analysis/yt/commits/dd0a0004572a/
Changeset:   dd0a0004572a
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 14:24:10
Summary:     Each contour field gets its own key.
Affected #:  4 files

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r dd0a0004572af76b5cb0cd19b8f9cc3d9148c0fa yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -15,11 +15,31 @@
 
 import numpy as np
 import copy
+import uuid
 
 from yt.funcs import *
 
 from .contour_finder import identify_contours
 
+from yt.fields.derived_field import \
+    ValidateSpatial
+
+def add_contour_field(ds, contour_key):
+    def _contours(field, data):
+        fd = data.get_field_parameter("contour_slices_%s" % contour_key)
+        vals = data["index", "ones"] * -1
+        if fd is None or fd == 0.0:
+            return vals
+        for sl, v in fd.get(data.id, []):
+            vals[sl] = v
+        return vals
+
+    ds.add_field(("index", "contours_%s" % contour_key),
+                 function=_contours,
+                 validators=[ValidateSpatial(0)],
+                 take_log=False,
+                 display_field=False)
+
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
@@ -115,11 +135,14 @@
         for sl_list in cids.values():
             for sl, ff in sl_list:
                 unique_contours.update(np.unique(ff))
+        contour_key = uuid.uuid4().hex
+        base_object = getattr(self.data, 'base_object', self.data)
+        add_contour_field(base_object.pf, contour_key)
         for cid in sorted(unique_contours):
             if cid == -1: continue
-            new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid)],
-                    {'contour_slices': cids})
+            new_clump = base_object.cut_region(
+                    ["obj['contours_%s'] == %s" % (contour_key, cid)],
+                    {('contour_slices_%s' % contour_key): cids})
             if new_clump["ones"].size == 0:
                 # This is to skip possibly duplicate clumps.  Using "ones" here
                 # will speed things up.
@@ -195,7 +218,7 @@
             elif (child._isValid()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r dd0a0004572af76b5cb0cd19b8f9cc3d9148c0fa yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -51,6 +51,8 @@
             LE, RE, dims.astype("int64"))
         contours[nid] = (g.Level, node.node_ind, pg, sl)
     node_ids = np.array(node_ids)
+    if node_ids.size == 0:
+        return 0, {}
     trunk = data_source.tiles.tree.trunk
     mylog.info("Linking node (%s) contours.", len(contours))
     link_node_contours(trunk, contours, tree, node_ids)

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r dd0a0004572af76b5cb0cd19b8f9cc3d9148c0fa yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -722,9 +722,14 @@
     def blocks(self):
         # We have to take a slightly different approach here.  Note that all
         # that .blocks has to yield is a 3D array and a mask.
-        for b, m in self.base_object.blocks:
-            m[~self._cond_ind] = 0
-            yield b, m
+        for obj, m in self.base_object.blocks:
+            m = m.copy()
+            with obj._field_parameter_state(self.field_parameters):
+                for cond in self.conditionals:
+                    ss = eval(cond)
+                    m = np.logical_and(m, ss, m)
+            if not np.any(m): continue
+            yield obj, m
 
     @property
     def _cond_ind(self):

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r dd0a0004572af76b5cb0cd19b8f9cc3d9148c0fa yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -207,18 +207,3 @@
              units="cm",
              display_field=False)
 
-    def _contours(field, data):
-        fd = data.get_field_parameter("contour_slices")
-        vals = data["index", "ones"] * -1
-        if fd is None or fd == 0.0:
-            return vals
-        for sl, v in fd.get(data.id, []):
-            vals[sl] = v
-        return vals
-    
-    registry.add_field(("index", "contours"),
-                       function=_contours,
-                       validators=[ValidateSpatial(0)],
-                       take_log=False,
-                       display_field=False)
-


https://bitbucket.org/yt_analysis/yt/commits/3f2098b833af/
Changeset:   3f2098b833af
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 15:21:56
Summary:     Just in case, do not allow cut_cut_regions.
Affected #:  1 file

diff -r dd0a0004572af76b5cb0cd19b8f9cc3d9148c0fa -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -731,6 +731,9 @@
             if not np.any(m): continue
             yield obj, m
 
+    def cut_region(self, *args, **kwargs):
+        raise NotImplementedError
+
     @property
     def _cond_ind(self):
         ind = None


https://bitbucket.org/yt_analysis/yt/commits/18bbebc1adb1/
Changeset:   18bbebc1adb1
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-18 23:42:14
Summary:     Updating clump finder writeout and recipe.
Affected #:  3 files

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -23,7 +23,7 @@
 # thing!  This is a convenience parameter that prepares an object that covers
 # the whole domain.  Note, though, that it will load on demand and not before!
 data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                      (8., 'kpc'), (1., 'kpc'))
+                      (1., 'kpc'), (1., 'kpc'))
 
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
@@ -68,10 +68,3 @@
 
 # Lastly, we write the plot to disk.
 prj.save('clumps')
-
-# We can also save the clump object to disk to read in later so we don't have
-# to spend a lot of time regenerating the clump objects.
-ds.h.save_object(master_clump, 'My_clumps')
-
-# Later, we can read in the clump object like so,
-master_clump = ds.load_object('My_clumps')

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -22,9 +22,6 @@
     get_lowest_clumps, \
     write_clump_index, \
     write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
     _DistanceToMainClump
 
 from .clump_tools import \

diff -r b4a6085f06ebc5f497319dc92dd55d00854863fe -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -65,21 +65,21 @@
         self.clump_info = []
 
         # Number of cells.
-        self.add_info_item('self.data["CellMassMsun"].size','"Cells: %d" % value')
+        self.add_info_item('self.data["gas", "cell_mass"].size','"Cells: %d" % value')
         # Gas mass in solar masses.
-        self.add_info_item('self.data["CellMassMsun"].sum()','"Mass: %e Msolar" % value')
+        self.add_info_item('self.data["gas", "cell_mass"].sum()','"Mass: %e Msolar" % value')
         # Volume-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")',
-                           '"Jeans Mass (vol-weighted): %.6e Msolar" % value')
+        self.add_info_item('self.data.quantities.weighted_average_quantity("jeans_mass", ("index", "cell_volume"))',
+                           '"Jeans Mass (volume-weighted): %.6e Msolar" % value')
         # Mass-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")',
+        self.add_info_item('self.data.quantities.weighted_average_quantity("jeans_mass", ("gas", "cell_mass"))',
                            '"Jeans Mass (mass-weighted): %.6e Msolar" % value')
         # Max level.
-        self.add_info_item('self.data["GridLevel"].max()','"Max grid level: %d" % value')
+        self.add_info_item('self.data["index", "grid_level"].max()','"Max grid level: %d" % value')
         # Minimum number density.
-        self.add_info_item('self.data["NumberDensity"].min()','"Min number density: %.6e cm^-3" % value')
+        self.add_info_item('self.data["number_density"].min()','"Min number density: %.6e cm^-3" % value')
         # Maximum number density.
-        self.add_info_item('self.data["NumberDensity"].max()','"Max number density: %.6e cm^-3" % value')
+        self.add_info_item('self.data["number_density"].max()','"Max number density: %.6e cm^-3" % value')
 
     def clear_clump_info(self):
         "Clears the clump_info array and passes the instruction to its children."
@@ -239,63 +239,17 @@
         for child in clump.children:
             write_clumps(child,0,f_ptr)
 
-# Old clump info writing routines.
-def write_old_clump_index(clump,level,f_ptr):
-    for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    write_old_clump_info(clump,level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
-
-def write_old_clumps(clump,level,f_ptr):
-    if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        write_old_clump_info(clump,level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-__clump_info_template = \
-"""
-%(tl)sCells: %(num_cells)s
-%(tl)sMass: %(total_mass).6e Msolar
-%(tl)sJeans Mass (vol-weighted): %(jeans_mass_vol).6e Msolar
-%(tl)sJeans Mass (mass-weighted): %(jeans_mass_mass).6e Msolar
-%(tl)sMax grid level: %(max_level)s
-%(tl)sMin number density: %(min_density).6e cm^-3
-%(tl)sMax number density: %(max_density).6e cm^-3
-
-"""
-
-def write_old_clump_info(clump,level,f_ptr):
-    fmt_dict = {'tl':  "\t" * level}
-    fmt_dict['num_cells'] = clump.data["CellMassMsun"].size,
-    fmt_dict['total_mass'] = clump.data["CellMassMsun"].sum()
-    fmt_dict['jeans_mass_vol'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")
-    fmt_dict['jeans_mass_mass'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")
-    fmt_dict['max_level'] =  clump.data["GridLevel"].max()
-    fmt_dict['min_density'] =  clump.data["NumberDensity"].min()
-    fmt_dict['max_density'] =  clump.data["NumberDensity"].max()
-    f_ptr.write(__clump_info_template % fmt_dict)
-
 # Recipes for various clump calculations.
 recipes = {}
 
 # Distance from clump center of mass to center of mass of top level object.
 def _DistanceToMainClump(master,units='pc'):
-    masterCOM = master.data.quantities['CenterOfMass']()
+    masterCOM = master.data.quantities.center_of_mass()
     pass_command = "self.masterCOM = [%.10f, %.10f, %.10f]" % (masterCOM[0],
                                                                masterCOM[1],
                                                                masterCOM[2])
     master.pass_down(pass_command)
-    master.pass_down("self.com = self.data.quantities['CenterOfMass']()")
+    master.pass_down("self.com = self.data.quantities.center_of_mass()")
 
     quantity = "((self.com[0]-self.masterCOM[0])**2 + (self.com[1]-self.masterCOM[1])**2 + (self.com[2]-self.masterCOM[2])**2)**(0.5)*self.data.pf.units['%s']" % units
     format = "%s%s%s" % ("'Distance from center: %.6e ",units,"' % value")


https://bitbucket.org/yt_analysis/yt/commits/410a9ab3a26f/
Changeset:   410a9ab3a26f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-19 22:42:34
Summary:     Moving operator registry to utilities.
Affected #:  7 files

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -27,14 +27,15 @@
      ensure_list, is_root
 from yt.utilities.exceptions import YTUnitConversionError
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
      PhasePlot
-     
-from .operator_registry import \
-    callback_registry
 
+callback_registry = OperatorRegistry()
+    
 def add_callback(name, function):
     callback_registry[name] =  HaloCallback(function)
 

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -27,10 +27,13 @@
      
 from .halo_object import \
      Halo
-from .operator_registry import \
-     callback_registry, \
-     filter_registry, \
-     finding_method_registry, \
+from .halo_callbacks import \
+     callback_registry
+from .halo_filters import \
+     filter_registry
+from .halo_finding_methods import \
+     finding_method_registry
+from .halo_quantities import \
      quantity_registry
 
 class HaloCatalog(ParallelAnalysisInterface):

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -15,10 +15,13 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.spatial import KDTree
 
 from .halo_callbacks import HaloCallback
-from .operator_registry import filter_registry
+
+filter_registry = OperatorRegistry()
 
 def add_filter(name, function):
     filter_registry[name] = HaloFilter(function)

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,10 +21,10 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 
-from .operator_registry import \
-    finding_method_registry
-
+finding_method_registry = OperatorRegistry()
 
 def add_finding_method(name, function):
     finding_method_registry[name] = HaloFindingMethod(function)

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/halo_quantities.py
--- a/yt/analysis_modules/halo_analysis/halo_quantities.py
+++ b/yt/analysis_modules/halo_analysis/halo_quantities.py
@@ -15,8 +15,12 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
 from .halo_callbacks import HaloCallback
-from .operator_registry import quantity_registry
+
+quantity_registry = OperatorRegistry()
 
 def add_quantity(name, function):
     quantity_registry[name] = HaloQuantity(function)

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Operation registry class
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import types
-
-class OperatorRegistry(dict):
-    def find(self, op, *args, **kwargs):
-        if isinstance(op, types.StringTypes):
-            # Lookup, assuming string or hashable object
-            op = copy.deepcopy(self[op])
-            op.args = args
-            op.kwargs = kwargs
-        return op
-
-callback_registry = OperatorRegistry()
-filter_registry = OperatorRegistry()
-finding_method_registry = OperatorRegistry()
-quantity_registry = OperatorRegistry()

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b yt/utilities/operator_registry.py
--- /dev/null
+++ b/yt/utilities/operator_registry.py
@@ -0,0 +1,26 @@
+"""
+Operation registry class
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import copy
+import types
+
+class OperatorRegistry(dict):
+    def find(self, op, *args, **kwargs):
+        if isinstance(op, types.StringTypes):
+            # Lookup, assuming string or hashable object
+            op = copy.deepcopy(self[op])
+            op.args = args
+            op.kwargs = kwargs
+        return op


https://bitbucket.org/yt_analysis/yt/commits/99ec890e2525/
Changeset:   99ec890e2525
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-19 23:56:27
Summary:     Converting clump info item to a registry system and changing write out functions to accept filenames.
Affected #:  3 files

diff -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b -r 99ec890e2525e5f93e1c041b417518e065750b4e yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -21,9 +21,11 @@
     find_clumps, \
     get_lowest_clumps, \
     write_clump_index, \
-    write_clumps, \
-    _DistanceToMainClump
+    write_clumps
 
+from .clump_info_items import \
+    add_clump_info
+    
 from .clump_tools import \
     recursive_all_clumps, \
     return_all_clumps, \

diff -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b -r 99ec890e2525e5f93e1c041b417518e065750b4e yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,12 +13,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import copy
 import numpy as np
-import copy
 
-from yt.funcs import *
+from .clump_info_items import \
+     clump_info_registry
 
-from .contour_finder import identify_contours
+from .contour_finder import \
+     identify_contours
 
 class Clump(object):
     children = None
@@ -50,13 +52,14 @@
         # Return value of validity function, saved so it does not have to be calculated again.
         self.function_value = None
 
-    def add_info_item(self,quantity,format):
+    def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
-        self.clump_info.append({'quantity':quantity, 'format':format})
+        callback = clump_info_registry.find(info_item, *args, **kwargs)
+        self.clump_info.append(callback)
         if self.children is None: return
         for child in self.children:
-            child.add_info_item(quantity,format)
+            child.add_info_item(info_item)
 
     def set_default_clump_info(self):
         "Defines default entries in the clump_info array."
@@ -64,22 +67,13 @@
         # add_info_item is recursive so this function does not need to be.
         self.clump_info = []
 
-        # Number of cells.
-        self.add_info_item('self.data["gas", "cell_mass"].size','"Cells: %d" % value')
-        # Gas mass in solar masses.
-        self.add_info_item('self.data["gas", "cell_mass"].sum()','"Mass: %e Msolar" % value')
-        # Volume-weighted Jeans mass.
-        self.add_info_item('self.data.quantities.weighted_average_quantity("jeans_mass", ("index", "cell_volume"))',
-                           '"Jeans Mass (volume-weighted): %.6e Msolar" % value')
-        # Mass-weighted Jeans mass.
-        self.add_info_item('self.data.quantities.weighted_average_quantity("jeans_mass", ("gas", "cell_mass"))',
-                           '"Jeans Mass (mass-weighted): %.6e Msolar" % value')
-        # Max level.
-        self.add_info_item('self.data["index", "grid_level"].max()','"Max grid level: %d" % value')
-        # Minimum number density.
-        self.add_info_item('self.data["number_density"].min()','"Min number density: %.6e cm^-3" % value')
-        # Maximum number density.
-        self.add_info_item('self.data["number_density"].max()','"Max number density: %.6e cm^-3" % value')
+        self.add_info_item("total_cells")
+        self.add_info_item("cell_mass")
+        self.add_info_item("mass_weighted_jeans_mass")
+        self.add_info_item("volume_weighted_jeans_mass")
+        self.add_info_item("max_grid_level")
+        self.add_info_item("min_number_density")
+        self.add_info_item("max_number_density")
 
     def clear_clump_info(self):
         "Clears the clump_info array and passes the instruction to its children."
@@ -89,18 +83,12 @@
         for child in self.children:
             child.clear_clump_info()
 
-    def write_info(self,level,f_ptr):
+    def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
         for item in self.clump_info:
-            # Call if callable, otherwise do an eval.
-            if callable(item['quantity']):
-                value = item['quantity']()
-            else:
-                value = eval(item['quantity'])
-            output = eval(item['format'])
-            f_ptr.write("%s%s" % ('\t'*level,output))
-            f_ptr.write("\n")
+            value = item(self)
+            f_ptr.write("%s%s\n" % ('\t'*level, value))
 
     def find_children(self, min_val, max_val = None):
         if self.children is not None:
@@ -218,42 +206,35 @@
 
     return clump_list
 
-def write_clump_index(clump,level,f_ptr):
+def write_clump_index(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
+        fh.write("\t")
+    fh.write("Clump at level %d:\n" % level)
+    clump.write_info(level, fh)
+    fh.write("\n")
+    fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
+            write_clump_index(child, (level+1), fh)
+    if top:
+        fh.close()
 
-def write_clumps(clump,level,f_ptr):
+def write_clumps(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        clump.write_info(level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
+        fh.write("%sClump:\n" % ("\t"*level))
+        clump.write_info(level, fh)
+        fh.write("\n")
+        fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-# Recipes for various clump calculations.
-recipes = {}
-
-# Distance from clump center of mass to center of mass of top level object.
-def _DistanceToMainClump(master,units='pc'):
-    masterCOM = master.data.quantities.center_of_mass()
-    pass_command = "self.masterCOM = [%.10f, %.10f, %.10f]" % (masterCOM[0],
-                                                               masterCOM[1],
-                                                               masterCOM[2])
-    master.pass_down(pass_command)
-    master.pass_down("self.com = self.data.quantities.center_of_mass()")
-
-    quantity = "((self.com[0]-self.masterCOM[0])**2 + (self.com[1]-self.masterCOM[1])**2 + (self.com[2]-self.masterCOM[2])**2)**(0.5)*self.data.pf.units['%s']" % units
-    format = "%s%s%s" % ("'Distance from center: %.6e ",units,"' % value")
-
-    master.add_info_item(quantity,format)
-
-recipes['DistanceToMainClump'] = _DistanceToMainClump
+            write_clumps(child, 0, fh)
+    if top:
+        fh.close()

diff -r 410a9ab3a26f09c1edd3a6afaca2e74fe008301b -r 99ec890e2525e5f93e1c041b417518e065750b4e yt/analysis_modules/level_sets/clump_info_items.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -0,0 +1,85 @@
+"""
+ClumpInfoCallback and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_info_registry = OperatorRegistry()
+
+def add_clump_info(name, function):
+    clump_info_registry[name] = ClumpInfoCallback(function)
+
+class ClumpInfoCallback(object):
+    r"""
+    A ClumpInfoCallback is a function that takes a clump, computes a 
+    quantity, and returns a string to be printed out for writing clump info.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _total_cells(clump):
+    n_cells = clump.data["index", "ones"].size
+    return "Cells: %d." % n_cells
+add_clump_info("total_cells", _total_cells)
+
+def _cell_mass(clump):
+    cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
+    return "Mass: %e Msun." % cell_mass
+add_clump_info("cell_mass", _cell_mass)
+
+def _mass_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+def _volume_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("index", "cell_volume")).in_units("Msun")
+    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
+
+def _max_grid_level(clump):
+    max_level = clump.data["index", "grid_level"].max()
+    return "Max grid level: %d." % max_level
+add_clump_info("max_grid_level", _max_grid_level)
+
+def _min_number_density(clump):
+    min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
+    return "Min number density: %.6e cm^-3." % min_n
+add_clump_info("min_number_density", _min_number_density)
+
+def _max_number_density(clump):
+    max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
+    return "Max number density: %.6e cm^-3." % max_n
+add_clump_info("max_number_density", _max_number_density)
+
+def _distance_to_main_clump(clump):
+    master = clump.parent
+    while master.parent is not None:
+        master = master.parent
+    master_com = master.data.quantities.center_of_mass()
+    my_com = clump.data.quantities.center_of_mass()
+    distance = np.sqrt(((master_com - my_com)**2).sum())
+    return "Distance from master center of mass: %.6e pc." % \
+      distance.in_units("pc")
+add_clump_info("distance_to_main_clump", _distance_to_main_clump)


https://bitbucket.org/yt_analysis/yt/commits/3d1625ef808f/
Changeset:   3d1625ef808f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-20 00:05:32
Summary:     Updating recipe.
Affected #:  1 file

diff -r 99ec890e2525e5f93e1c041b417518e065750b4e -r 3d1625ef808f38c1f2b14406d524fbaaa62c08eb doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,35 +1,24 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import numpy as np
 
 import yt
-from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
-                                                get_lowest_clumps,
-                                                write_clump_index,
-                                                write_clumps)
+from yt.analysis_modules.level_sets.api import *
 
-fn = "IsolatedGalaxy/galaxy0030/galaxy0030"  # parameter file to load
-# this is the field we look for contours over -- we could do
-# this over anything.  Other common choices are 'AveragedDensity'
-# and 'Dark_Matter_Density'.
-field = "density"
+fn = "IsolatedGalaxy/galaxy0030/galaxy0030" # parameter file to load
+field = "Density" # this is the field we look for contours over -- we could do
+                  # this over anything.  Other common choices are 'AveragedDensity'
+                  # and 'Dark_Matter_Density'.
+step = 2.0 # This is the multiplicative interval between contours.
 
-step = 2.0  # This is the multiplicative interval between contours.
+ds = yt.load(fn) # load data
 
-ds = yt.load(fn)  # load data
-
-# We want to find clumps over the entire dataset, so we'll just grab the whole
-# thing!  This is a convenience parameter that prepares an object that covers
-# the whole domain.  Note, though, that it will load on demand and not before!
-data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                      (1., 'kpc'), (1., 'kpc'))
+data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
+                      (8, 'kpc'), (1, 'kpc'))
 
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**np.floor(np.log10(data_source[field]).min())
-c_max = 10**np.floor(np.log10(data_source[field]).max() + 1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field
@@ -46,22 +35,18 @@
 
 # As it goes, it appends the information about all the sub-clumps to the
 # master-clump.  Among different ways we can examine it, there's a convenience
-# function for outputting the full index to a file.
-f = open('%s_clump_index.txt' % ds, 'w')
-write_clump_index(master_clump, 0, f)
-f.close()
+# function for outputting the full hierarchy to a file.
+write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
 # We can also output some handy information, as well.
-f = open('%s_clumps.txt' % ds, 'w')
-write_clumps(master_clump, 0, f)
-f.close()
+write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-# We can traverse the clump index to get a list of all of the 'leaf' clumps
+# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
 
 # If you'd like to visualize these clumps, a list of clumps can be supplied to
 # the "clumps" callback on a plot.  First, we create a projection plot:
-prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20, 'kpc'))
+prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20,'kpc'))
 
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)


https://bitbucket.org/yt_analysis/yt/commits/4f6be1015093/
Changeset:   4f6be1015093
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-20 00:20:13
Summary:     Fixing distance to center of mass info item.
Affected #:  1 file

diff -r 3d1625ef808f38c1f2b14406d524fbaaa62c08eb -r 4f6be1015093d1dc31e6407f82439fd2c58dcff8 yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -13,6 +13,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
 from yt.utilities.operator_registry import \
      OperatorRegistry
 
@@ -73,13 +75,13 @@
     return "Max number density: %.6e cm^-3." % max_n
 add_clump_info("max_number_density", _max_number_density)
 
-def _distance_to_main_clump(clump):
-    master = clump.parent
+def _distance_to_main_clump(clump, units="pc"):
+    master = clump
     while master.parent is not None:
         master = master.parent
-    master_com = master.data.quantities.center_of_mass()
-    my_com = clump.data.quantities.center_of_mass()
+    master_com = clump.data.pf.arr(master.data.quantities.center_of_mass())
+    my_com = clump.data.pf.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
-    return "Distance from master center of mass: %.6e pc." % \
-      distance.in_units("pc")
+    return "Distance from master center of mass: %.6e %s." % \
+      (distance.in_units(units), units)
 add_clump_info("distance_to_main_clump", _distance_to_main_clump)


https://bitbucket.org/yt_analysis/yt/commits/3ebd9fa7cd5f/
Changeset:   3ebd9fa7cd5f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-20 02:58:37
Summary:     Merging.
Affected #:  15 files

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,11 +1,7 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import numpy as np
 
 import yt
-from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
-                                                get_lowest_clumps)
+from yt.analysis_modules.level_sets.api import *
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"  # dataset to load
 # this is the field we look for contours over -- we could do
@@ -13,21 +9,18 @@
 # and 'Dark_Matter_Density'.
 field = "density"
 
-step = 2.0  # This is the multiplicative interval between contours.
+step = 2.0 # This is the multiplicative interval between contours.
 
-ds = yt.load(fn)  # load data
+ds = yt.load(fn) # load data
 
-# We want to find clumps over the entire dataset, so we'll just grab the whole
-# thing!  This is a convenience parameter that prepares an object that covers
-# the whole domain.  Note, though, that it will load on demand and not before!
-data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                      (8., 'kpc'), (1., 'kpc'))
+data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
+                      (8, 'kpc'), (1, 'kpc'))
 
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**np.floor(np.log10(data_source[field]).min())
-c_max = 10**np.floor(np.log10(data_source[field]).max() + 1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field
@@ -44,32 +37,21 @@
 
 # As it goes, it appends the information about all the sub-clumps to the
 # master-clump.  Among different ways we can examine it, there's a convenience
-# function for outputting the full index to a file.
-f = open('%s_clump_index.txt' % ds, 'w')
-yt.amods.level_sets.write_clump_index(master_clump, 0, f)
-f.close()
+# function for outputting the full hierarchy to a file.
+write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
 # We can also output some handy information, as well.
-f = open('%s_clumps.txt' % ds, 'w')
-yt.amods.level_sets.write_clumps(master_clump, 0, f)
-f.close()
+write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-# We can traverse the clump index to get a list of all of the 'leaf' clumps
+# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
 
 # If you'd like to visualize these clumps, a list of clumps can be supplied to
 # the "clumps" callback on a plot.  First, we create a projection plot:
-prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20, 'kpc'))
+prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20,'kpc'))
 
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
 # Lastly, we write the plot to disk.
 prj.save('clumps')
-
-# We can also save the clump object to disk to read in later so we don't have
-# to spend a lot of time regenerating the clump objects.
-ds.save_object(master_clump, 'My_clumps')
-
-# Later, we can read in the clump object like so,
-master_clump = ds.load_object('My_clumps')

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -27,14 +27,15 @@
      ensure_list, is_root
 from yt.utilities.exceptions import YTUnitConversionError
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
      PhasePlot
-     
-from .operator_registry import \
-    callback_registry
 
+callback_registry = OperatorRegistry()
+    
 def add_callback(name, function):
     callback_registry[name] =  HaloCallback(function)
 

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -27,10 +27,13 @@
      
 from .halo_object import \
      Halo
-from .operator_registry import \
-     callback_registry, \
-     filter_registry, \
-     finding_method_registry, \
+from .halo_callbacks import \
+     callback_registry
+from .halo_filters import \
+     filter_registry
+from .halo_finding_methods import \
+     finding_method_registry
+from .halo_quantities import \
      quantity_registry
 
 class HaloCatalog(ParallelAnalysisInterface):

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -15,10 +15,13 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.spatial import KDTree
 
 from .halo_callbacks import HaloCallback
-from .operator_registry import filter_registry
+
+filter_registry = OperatorRegistry()
 
 def add_filter(name, function):
     filter_registry[name] = HaloFilter(function)

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,10 +21,10 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 
-from .operator_registry import \
-    finding_method_registry
-
+finding_method_registry = OperatorRegistry()
 
 def add_finding_method(name, function):
     finding_method_registry[name] = HaloFindingMethod(function)

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/halo_quantities.py
--- a/yt/analysis_modules/halo_analysis/halo_quantities.py
+++ b/yt/analysis_modules/halo_analysis/halo_quantities.py
@@ -15,8 +15,12 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
 from .halo_callbacks import HaloCallback
-from .operator_registry import quantity_registry
+
+quantity_registry = OperatorRegistry()
 
 def add_quantity(name, function):
     quantity_registry[name] = HaloQuantity(function)

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Operation registry class
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import types
-
-class OperatorRegistry(dict):
-    def find(self, op, *args, **kwargs):
-        if isinstance(op, types.StringTypes):
-            # Lookup, assuming string or hashable object
-            op = copy.deepcopy(self[op])
-            op.args = args
-            op.kwargs = kwargs
-        return op
-
-callback_registry = OperatorRegistry()
-filter_registry = OperatorRegistry()
-finding_method_registry = OperatorRegistry()
-quantity_registry = OperatorRegistry()

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -21,12 +21,11 @@
     find_clumps, \
     get_lowest_clumps, \
     write_clump_index, \
-    write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
-    _DistanceToMainClump
+    write_clumps
 
+from .clump_info_items import \
+    add_clump_info
+    
 from .clump_tools import \
     recursive_all_clumps, \
     return_all_clumps, \

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,12 +13,14 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import copy
 import numpy as np
-import copy
 
-from yt.funcs import *
+from .clump_info_items import \
+     clump_info_registry
 
-from .contour_finder import identify_contours
+from .contour_finder import \
+     identify_contours
 
 class Clump(object):
     children = None
@@ -50,13 +52,14 @@
         # Return value of validity function, saved so it does not have to be calculated again.
         self.function_value = None
 
-    def add_info_item(self,quantity,format):
+    def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
-        self.clump_info.append({'quantity':quantity, 'format':format})
+        callback = clump_info_registry.find(info_item, *args, **kwargs)
+        self.clump_info.append(callback)
         if self.children is None: return
         for child in self.children:
-            child.add_info_item(quantity,format)
+            child.add_info_item(info_item)
 
     def set_default_clump_info(self):
         "Defines default entries in the clump_info array."
@@ -64,22 +67,13 @@
         # add_info_item is recursive so this function does not need to be.
         self.clump_info = []
 
-        # Number of cells.
-        self.add_info_item('self.data["CellMassMsun"].size','"Cells: %d" % value')
-        # Gas mass in solar masses.
-        self.add_info_item('self.data["CellMassMsun"].sum()','"Mass: %e Msolar" % value')
-        # Volume-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")',
-                           '"Jeans Mass (vol-weighted): %.6e Msolar" % value')
-        # Mass-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")',
-                           '"Jeans Mass (mass-weighted): %.6e Msolar" % value')
-        # Max level.
-        self.add_info_item('self.data["GridLevel"].max()','"Max grid level: %d" % value')
-        # Minimum number density.
-        self.add_info_item('self.data["NumberDensity"].min()','"Min number density: %.6e cm^-3" % value')
-        # Maximum number density.
-        self.add_info_item('self.data["NumberDensity"].max()','"Max number density: %.6e cm^-3" % value')
+        self.add_info_item("total_cells")
+        self.add_info_item("cell_mass")
+        self.add_info_item("mass_weighted_jeans_mass")
+        self.add_info_item("volume_weighted_jeans_mass")
+        self.add_info_item("max_grid_level")
+        self.add_info_item("min_number_density")
+        self.add_info_item("max_number_density")
 
     def clear_clump_info(self):
         "Clears the clump_info array and passes the instruction to its children."
@@ -89,18 +83,12 @@
         for child in self.children:
             child.clear_clump_info()
 
-    def write_info(self,level,f_ptr):
+    def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
         for item in self.clump_info:
-            # Call if callable, otherwise do an eval.
-            if callable(item['quantity']):
-                value = item['quantity']()
-            else:
-                value = eval(item['quantity'])
-            output = eval(item['format'])
-            f_ptr.write("%s%s" % ('\t'*level,output))
-            f_ptr.write("\n")
+            value = item(self)
+            f_ptr.write("%s%s\n" % ('\t'*level, value))
 
     def find_children(self, min_val, max_val = None):
         if self.children is not None:
@@ -108,10 +96,22 @@
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        for cid in sorted(unique_contours):
+            if cid == -1: continue
             new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid + 1)],
+                    ["obj['contours'] == %s" % (cid)],
                     {'contour_slices': cids})
+            if new_clump["ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
                                        self.cached_fields,function=self.function,
                                        clump_info=self.clump_info))
@@ -206,88 +206,35 @@
 
     return clump_list
 
-def write_clump_index(clump,level,f_ptr):
+def write_clump_index(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
+        fh.write("\t")
+    fh.write("Clump at level %d:\n" % level)
+    clump.write_info(level, fh)
+    fh.write("\n")
+    fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
+            write_clump_index(child, (level+1), fh)
+    if top:
+        fh.close()
 
-def write_clumps(clump,level,f_ptr):
+def write_clumps(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        clump.write_info(level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
+        fh.write("%sClump:\n" % ("\t"*level))
+        clump.write_info(level, fh)
+        fh.write("\n")
+        fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-# Old clump info writing routines.
-def write_old_clump_index(clump,level,f_ptr):
-    for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    write_old_clump_info(clump,level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
-
-def write_old_clumps(clump,level,f_ptr):
-    if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        write_old_clump_info(clump,level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-__clump_info_template = \
-"""
-%(tl)sCells: %(num_cells)s
-%(tl)sMass: %(total_mass).6e Msolar
-%(tl)sJeans Mass (vol-weighted): %(jeans_mass_vol).6e Msolar
-%(tl)sJeans Mass (mass-weighted): %(jeans_mass_mass).6e Msolar
-%(tl)sMax grid level: %(max_level)s
-%(tl)sMin number density: %(min_density).6e cm^-3
-%(tl)sMax number density: %(max_density).6e cm^-3
-
-"""
-
-def write_old_clump_info(clump,level,f_ptr):
-    fmt_dict = {'tl':  "\t" * level}
-    fmt_dict['num_cells'] = clump.data["CellMassMsun"].size,
-    fmt_dict['total_mass'] = clump.data["CellMassMsun"].sum()
-    fmt_dict['jeans_mass_vol'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")
-    fmt_dict['jeans_mass_mass'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")
-    fmt_dict['max_level'] =  clump.data["GridLevel"].max()
-    fmt_dict['min_density'] =  clump.data["NumberDensity"].min()
-    fmt_dict['max_density'] =  clump.data["NumberDensity"].max()
-    f_ptr.write(__clump_info_template % fmt_dict)
-
-# Recipes for various clump calculations.
-recipes = {}
-
-# Distance from clump center of mass to center of mass of top level object.
-def _DistanceToMainClump(master,units='pc'):
-    masterCOM = master.data.quantities['CenterOfMass']()
-    pass_command = "self.masterCOM = [%.10f, %.10f, %.10f]" % (masterCOM[0],
-                                                               masterCOM[1],
-                                                               masterCOM[2])
-    master.pass_down(pass_command)
-    master.pass_down("self.com = self.data.quantities['CenterOfMass']()")
-
-    quantity = "((self.com[0]-self.masterCOM[0])**2 + (self.com[1]-self.masterCOM[1])**2 + (self.com[2]-self.masterCOM[2])**2)**(0.5)*self.data.ds.units['%s']" % units
-    format = "%s%s%s" % ("'Distance from center: %.6e ",units,"' % value")
-
-    master.add_info_item(quantity,format)
-
-recipes['DistanceToMainClump'] = _DistanceToMainClump
+            write_clumps(child, 0, fh)
+    if top:
+        fh.close()

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/level_sets/clump_info_items.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -0,0 +1,87 @@
+"""
+ClumpInfoCallback and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_info_registry = OperatorRegistry()
+
+def add_clump_info(name, function):
+    clump_info_registry[name] = ClumpInfoCallback(function)
+
+class ClumpInfoCallback(object):
+    r"""
+    A ClumpInfoCallback is a function that takes a clump, computes a 
+    quantity, and returns a string to be printed out for writing clump info.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _total_cells(clump):
+    n_cells = clump.data["index", "ones"].size
+    return "Cells: %d." % n_cells
+add_clump_info("total_cells", _total_cells)
+
+def _cell_mass(clump):
+    cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
+    return "Mass: %e Msun." % cell_mass
+add_clump_info("cell_mass", _cell_mass)
+
+def _mass_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+def _volume_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("index", "cell_volume")).in_units("Msun")
+    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
+
+def _max_grid_level(clump):
+    max_level = clump.data["index", "grid_level"].max()
+    return "Max grid level: %d." % max_level
+add_clump_info("max_grid_level", _max_grid_level)
+
+def _min_number_density(clump):
+    min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
+    return "Min number density: %.6e cm^-3." % min_n
+add_clump_info("min_number_density", _min_number_density)
+
+def _max_number_density(clump):
+    max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
+    return "Max number density: %.6e cm^-3." % max_n
+add_clump_info("max_number_density", _max_number_density)
+
+def _distance_to_main_clump(clump, units="pc"):
+    master = clump
+    while master.parent is not None:
+        master = master.parent
+    master_com = clump.data.pf.arr(master.data.quantities.center_of_mass())
+    my_com = clump.data.pf.arr(clump.data.quantities.center_of_mass())
+    distance = np.sqrt(((master_com - my_com)**2).sum())
+    return "Distance from master center of mass: %.6e %s." % \
+      (distance.in_units(units), units)
+add_clump_info("distance_to_main_clump", _distance_to_main_clump)

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -39,9 +39,9 @@
         node_ids.append(nid)
         values = g[field][sl].astype("float64")
         contour_ids = np.zeros(dims, "int64") - 1
-        gct.identify_contours(values, contour_ids, total_contours)
+        total_contours += gct.identify_contours(values, contour_ids,
+                                                total_contours)
         new_contours = tree.cull_candidates(contour_ids)
-        total_contours += new_contours.shape[0]
         tree.add_contours(new_contours)
         # Now we can create a partitioned grid with the contours.
         LE = (DLE + g.dds * gi).in_units("code_length").ndarray_view()

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -16,6 +16,7 @@
 
 import types
 import numpy as np
+from contextlib import contextmanager
 
 from yt.funcs import *
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
@@ -718,6 +719,14 @@
             self.field_data[field] = self.base_object[field][ind]
 
     @property
+    def blocks(self):
+        # We have to take a slightly different approach here.  Note that all
+        # that .blocks has to yield is a 3D array and a mask.
+        for b, m in self.base_object.blocks:
+            m[~self._cond_ind] = 0
+            yield b, m
+
+    @property
     def _cond_ind(self):
         ind = None
         obj = self.base_object

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -349,6 +349,7 @@
         for i in range(ni*nj*nk): 
             if container[i] != NULL: free(container[i])
         free(container)
+        return nc
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -447,8 +448,6 @@
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
                     get_spos(vc0, i + oi, -1, j + oj, 1, spos)

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/utilities/operator_registry.py
--- /dev/null
+++ b/yt/utilities/operator_registry.py
@@ -0,0 +1,26 @@
+"""
+Operation registry class
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import copy
+import types
+
+class OperatorRegistry(dict):
+    def find(self, op, *args, **kwargs):
+        if isinstance(op, types.StringTypes):
+            # Lookup, assuming string or hashable object
+            op = copy.deepcopy(self[op])
+            op.args = args
+            op.kwargs = kwargs
+        return op

diff -r 7688ec7a09af1bf76b46dd9a085071c85dbdec4f -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -689,20 +689,20 @@
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
-            mylog.debug("Pixelizing contour %s", i)
+            mylog.info("Pixelizing contour %s", i)
 
-            xf_copy = clump[xf].copy()
-            yf_copy = clump[yf].copy()
+            xf_copy = clump[xf].copy().in_units("code_length")
+            yf_copy = clump[yf].copy().in_units("code_length")
 
             temp = _MPL.Pixelize(xf_copy, yf_copy,
-                                 clump[dxf]/2.0,
-                                 clump[dyf]/2.0,
-                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
+                                 clump[dxf].in_units("code_length")/2.0,
+                                 clump[dyf].in_units("code_length")/2.0,
+                                 clump[dxf].d*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
-                                     extent=extent,**self.plot_args)
+                                     extent=extent, **self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


https://bitbucket.org/yt_analysis/yt/commits/df38c1edec39/
Changeset:   df38c1edec39
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-20 03:00:27
Summary:     pf to ds.
Affected #:  1 file

diff -r 3ebd9fa7cd5fcb2b5558f0d7af8bb2e07eb796bb -r df38c1edec39584131bd3a400fcacf6ceea09e19 yt/analysis_modules/level_sets/clump_info_items.py
--- a/yt/analysis_modules/level_sets/clump_info_items.py
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -79,8 +79,8 @@
     master = clump
     while master.parent is not None:
         master = master.parent
-    master_com = clump.data.pf.arr(master.data.quantities.center_of_mass())
-    my_com = clump.data.pf.arr(clump.data.quantities.center_of_mass())
+    master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
+    my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
     distance = np.sqrt(((master_com - my_com)**2).sum())
     return "Distance from master center of mass: %.6e %s." % \
       (distance.in_units(units), units)


https://bitbucket.org/yt_analysis/yt/commits/7c7dd952078c/
Changeset:   7c7dd952078c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 15:25:49
Summary:     Merging with Britton's work, which includes lots of doc stuff.
Affected #:  350 files

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -208,38 +208,38 @@
 After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
-\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{ds = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
 \texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
 numpy array \texttt{a}. Similarly for other data containers.\\
-\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
-\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
-\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
+ \texttt{bl = ds.boolean({\it constructor})} \textemdash\ Create a boolean data
  container. {\it constructor} is a list of pre-defined non-boolean 
  data containers with nested boolean logic using the
  ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
  {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
  by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
  
-\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields \& Quantities}
@@ -261,15 +261,15 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+\texttt{slc = SlicePlot(ds, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
 perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
 {\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
 \texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
-\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = OffAxisSlicePlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
+\texttt{prj = OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -365,8 +365,8 @@
 \subsection{FAQ}
 \settowidth{\MyLen}{\texttt{multicol}}
 
-\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
-Must enter \texttt{pf.h} before this command. \\
+\texttt{ds.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
+Must enter \texttt{ds.index} before this command. \\
 
 
 %\rule{0.3\linewidth}{0.25pt}

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -49,7 +49,7 @@
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
- * Parameter files are a last resort.
+ * Parameter files external to yt are a last resort.
  * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
@@ -61,7 +61,7 @@
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
      parameters are now properties on a Dataset subclass: you access them
-     like pf.refine_by .
+     like ds.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality
      * TopGridDimensions => domain_dimensions

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -73,7 +73,7 @@
     Examples
     --------
     These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
     a plot collection, 'c' for a center, and 'L' for a vector. 
 
     >>> a=[1,2,3]

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -19,7 +19,7 @@
 useful variable names that correspond to specific instances that the user is
 presupposed to have created.
 
-   * `pf`: a parameter file, loaded successfully
+   * `ds`: a dataset, loaded successfully
    * `sp`: a sphere
    * `c`: a 3-component "center"
    * `L`: a 3-component vector that corresponds to either angular momentum or a

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/helper_scripts/parse_cb_list.py
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/visualizing/_cb_docstrings.inc", "w")
 

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/helper_scripts/parse_dq_list.py
--- a/doc/helper_scripts/parse_dq_list.py
+++ b/doc/helper_scripts/parse_dq_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_dq_docstrings.inc", "w")
 
@@ -29,7 +29,7 @@
                             docstring = docstring))
                             #docstring = "\n".join(tw.wrap(docstring))))
 
-dd = pf.h.all_data()
+dd = ds.all_data()
 for n,func in sorted(dd.quantities.functions.items()):
     print n, func
     write_docstring(output, n, func[1])

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/helper_scripts/parse_object_list.py
--- a/doc/helper_scripts/parse_object_list.py
+++ b/doc/helper_scripts/parse_object_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_obj_docstrings.inc", "w")
 
@@ -27,7 +27,7 @@
     f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
                             docstring = 'physical-object-api'))
 
-for n,c in sorted(pf.h.__dict__.items()):
+for n,c in sorted(ds.__dict__.items()):
     if hasattr(c, '_con_args'):
         print n
         write_docstring(output, n, c)

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,15 +17,15 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
+Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
 .. notebook-cell::
 
   from yt.mods import *
-  pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.field_list):
+  ds = load("Enzo_64/DD0043/data0043")
+  for i in sorted(ds.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ b/doc/source/analyzing/_obj_docstrings.inc
@@ -1,12 +1,12 @@
 
 
-.. class:: boolean(self, regions, fields=None, pf=None, **field_parameters):
+.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
 
 
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, pf=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
+.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
@@ -24,13 +24,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
 
 
-.. class:: disk(self, center, normal, radius, height, fields=None, pf=None, **field_parameters):
+.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
 
 
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, pf=None, **field_parameters):
+.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
@@ -48,79 +48,79 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
 
 
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, pf=None, **field_parameters):
+.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
 
 
-.. class:: grid_collection(self, center, grid_list, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
 
 
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
 
 
-.. class:: inclined_box(self, origin, box_vectors, fields=None, pf=None, **field_parameters):
+.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
 
 
-.. class:: ortho_ray(self, axis, coords, fields=None, pf=None, **field_parameters):
+.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
 
 
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
+.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
 
 
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
 
 
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
 
 
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
+.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
 
 
-.. class:: ray(self, start_point, end_point, fields=None, pf=None, **field_parameters):
+.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
 
 
-.. class:: region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
 
 
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
 
 
-.. class:: slice(self, axis, coord, fields=None, center=None, pf=None, node_name=False, **field_parameters):
+.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
@@ -132,13 +132,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
 
 
-.. class:: sphere(self, center, radius, fields=None, pf=None, **field_parameters):
+.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
 
 
-.. class:: streamline(self, positions, length=1.0, fields=None, pf=None, **field_parameters):
+.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -44,7 +44,7 @@
       "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -62,7 +62,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+      "halos_ds = load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -80,7 +80,7 @@
      "collapsed": false,
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
       "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -295,9 +295,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -407,4 +407,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -222,7 +222,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"cube.fits\")"
+      "ds = load(\"cube.fits\")"
      ],
      "language": "python",
      "metadata": {},
@@ -233,7 +233,7 @@
      "collapsed": false,
      "input": [
       "# Specifying no center gives us the center slice\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -246,9 +246,9 @@
      "input": [
       "import yt.units as u\n",
       "# Picking different velocities for the slices\n",
-      "new_center = pf.domain_center\n",
-      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -259,8 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -271,8 +271,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -290,7 +290,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -303,4 +303,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -84,8 +84,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=True, opening_angle=2.0)
@@ -97,8 +97,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=False)

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,8 +58,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(pf)
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters
@@ -69,8 +69,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  haloes = LoadHaloes(pf, 'MyHaloList')
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
 object "haloes", you can get loop over the list of haloes and do
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.ellipsoid(ell_param[0],
+  ell = ds.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -8,6 +8,7 @@
    :maxdepth: 1
 
    halo_catalogs
+   halo_transition
    halo_finding
    halo_mass_function
    halo_analysis_example

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,9 +7,11 @@
 together into a single framework. This framework is substantially
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.0 please see :ref:`halo_transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
-catalog through data_pf. These halos can be found using friends-of-friends,
+catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
 use. The available arguments are 'fof', 'hop', and'rockstar'. For more
 details on the relative differences between these halo finders see 
@@ -19,32 +21,32 @@
 
    from yt.mods import *
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
 HOP finders. Even though rockstar creates one file per processor, 
 specifying any one file allows the full catalog to be loaded. Here we 
 only specify the file output by the processor with ID 0. Note that the 
-argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
 any new information about the identified halos. To associate the halos 
 with the dataset from which they were found, supply arguments to both 
-halos_pf and data_pf.
+halos_ds and data_ds.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -215,8 +217,8 @@
 
 .. code-block:: python
 
-   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-   hc = HaloCatalog(halos_pf=hpf,
+   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_finders.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -0,0 +1,192 @@
+.. _halo_finding:
+
+Halo Finding
+============
+
+There are four methods of finding particle haloes in yt. The 
+recommended and default method is called HOP, a method described 
+in `Eisenstein and Hut (1998) 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
+friends-of-friends (e.g. `Efstathiou et al. (1985) 
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
+finder is also implemented. Finally Rockstar (`Behroozi et a. 
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
+a 6D-phase space halo finder developed by Peter Behroozi that 
+excels in finding subhalos and substrcture, but does not allow 
+multiple particle masses.
+
+HOP
+---
+
+The version of HOP used in yt is an upgraded version of the 
+`publicly available HOP code 
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
+for 64-bit floats and integers has been added, as well as 
+parallel analysis through spatial decomposition. HOP builds 
+groups in this fashion:
+
+  1. Estimates the local density at each particle using a 
+       smoothing kernel.
+  2. Builds chains of linked particles by 'hopping' from one 
+       particle to its densest neighbor. A particle which is 
+       its own densest neighbor is the end of the chain.
+  3. All chains that share the same densest particle are 
+       grouped together.
+  4. Groups are included, linked together, or discarded 
+       depending on the user-supplied over density
+       threshold parameter. The default is 160.0.
+
+Please see the `HOP method paper 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
+full details.
+
+.. warning:: The FoF halo finder in yt is not thoroughly tested! 
+    It is probably fine to use, but you are strongly encouraged 
+    to check your results against the data for errors.
+
+Rockstar Halo Finding
+---------------------
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
+groups in six phase-space dimensions and one time dimension, which 
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt, 
+but also `separately available <http://code.google.com/p/rockstar>`_. The lead 
+developer is Peter Behroozi, and the methods are described in `Behroozi
+et al. 2011 <http://rockstar.googlecode.com/files/rockstar_ap101911.pdf>`_. 
+
+.. note:: At the moment, Rockstar does not support multiple particle masses, 
+  instead using a fixed particle mass. This will not affect most dark matter 
+  simulations, but does make it less useful for finding halos from the stellar
+  mass. In simulations where the highest-resolution particles all have the 
+  same mass (ie: zoom-in grid based simulations), one can set up a particle
+  filter to select the lowest mass particles and perform the halo finding
+  only on those.
+
+To run the Rockstar Halo finding, you must launch python with MPI and 
+parallelization enabled. While Rockstar itself does not require MPI to run, 
+the MPI libraries allow yt to distribute particle information across multiple 
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which 
+disseminates run information and coordinates writer-reader processes. 
+Afterwards, it launches reader and writer tasks, filling the available MPI 
+slots, which alternately read particle information and analyze for halo 
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the 
+halo catalog through the ``finder_kwargs`` argument:
+
+  * ``dm_type``, the index of the dark matter particle. Default is 1. 
+  * ``outbase``, This is where the out*list files that Rockstar makes should be
+    placed. Default is 'rockstar_halos'.
+  * ``num_readers``, the number of reader tasks (which are idle most of the 
+    time.) Default is 1.
+  * ``num_writers``, the number of writer tasks (which are fed particles and
+    do most of the analysis). Default is MPI_TASKS-num_readers-1. 
+    If left undefined, the above options are automatically 
+    configured from the number of available MPI tasks.
+  * ``force_res``, the resolution that Rockstar uses for various calculations
+    and smoothing lengths. This is in units of Mpc/h.
+    If no value is provided, this parameter is automatically set to
+    the width of the smallest grid element in the simulation from the
+    last data snapshot (i.e. the one where time has evolved the
+    longest) in the time series:
+    ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  * ``total_particles``, if supplied, this is a pre-calculated
+    total number of dark matter
+    particles present in the simulation. For example, this is useful
+    when analyzing a series of snapshots where the number of dark
+    matter particles should not change and this will save some disk
+    access time. If left unspecified, it will
+    be calculated automatically. Default: ``None``.
+  * ``dm_only``, if set to ``True``, it will be assumed that there are
+    only dark matter particles present in the simulation.
+    This option does not modify the halos found by Rockstar, however
+    this option can save disk access time if there are no star particles
+    (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and 
+out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
+We use the halo list classes to recover the information. 
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+Parallel HOP and FOF
+--------------------
+
+Both the HOP and FoF halo finders can run in parallel using simple 
+spatial decomposition. In order to run them in parallel it is helpful 
+to understand how it works. Below in the first plot (i) is a simplified 
+depiction of three haloes labeled 1,2 and 3:
+
+.. image:: _images/ParallelHaloFinder.png
+   :width: 500
+
+Halo 3 is twice reflected around the periodic boundary conditions.
+
+In (ii), the volume has been sub-divided into four equal subregions, 
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
+two different subregions, C and D, and that halo 3 is now in three, 
+A, B and D. If the halo finder is run on these four separate subregions,
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
+up into multiple haloes, which is incorrect. The solution is to give 
+each subregion padding to oversample into neighboring regions.
+
+In (iii), subregion C has oversampled into the other three regions, 
+with the periodic boundary conditions taken into account, shown by 
+dot-dashed lines. The other subregions oversample in a similar way.
+
+The halo finder is then run on each padded subregion independently 
+and simultaneously. By oversampling like this, haloes 2 and 3 will 
+both be enclosed fully in at least one subregion and identified 
+completely.
+
+Haloes identified with centers of mass inside the padded part of a 
+subregion are thrown out, eliminating the problem of halo duplication. 
+The centers for the three haloes are shown with stars. Halo 1 will
+belong to subregion A, 2 to C and 3 to B.
+
+To run with parallel halo finding, you must supply a value for 
+padding in the finder_kwargs argument. The ``padding`` parameter 
+is in simulation units and defaults to 0.02. This parameter is how 
+much padding is added to each of the six sides of a subregion. 
+This value should be 2x-3x larger than the largest expected halo 
+in the simulation. It is unlikely, of course, that the largest 
+object in the simulation will be on a subregion boundary, but there 
+is no way of knowing before the halo finder is run.
+
+.. code-block:: python
+
+  from yt.mods import *
+  from yt.analysis_modules.halo_analysis.api import *
+  ds = load("data0001")
+  hc= HaloCatalog(data_ds =ds,finder_method='hop'
+    finder_kwargs={'padding':0.02})
+  # --or--
+  hc= HaloCatalog(data_ds =ds,finder_method='fof'
+    finder_kwargs={'padding':0.02})
+
+
+In general, a little bit of padding goes a long way, and too much 
+just slows down the analysis and doesn't improve the answer (but 
+doesn't change it).  It may be worth your time to run the parallel 
+halo finder at a few paddings to find the right amount, especially 
+if you're analyzing many similar datasets.
+
+Rockstar Installation
+=====================
+
+The Rockstar is slightly patched and modified to run as a library inside of 
+yt. By default it will be built with yt using the ``install_script.sh``.
+If it wasn't installed, please make sure that the installation setting
+``INST_ROCKSTAR=1`` is defined in the ``install_script.sh`` and re-run
+the installation script.

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -60,8 +60,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", num_sigma_bins=200,
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", num_sigma_bins=200,
   mass_column=5)
 
 Attached to ``hmf`` is the convenience function ``write_out``, which saves
@@ -102,8 +102,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", 
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4)
   hmf.write_out(prefix='hmf')

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_profiling.rst
--- a/doc/source/analyzing/analysis_modules/halo_profiling.rst
+++ /dev/null
@@ -1,451 +0,0 @@
-.. _halo_profiling:
-
-Halo Profiling
-==============
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>,
-   Stephen Skory <s at skory.us>
-
-The ``HaloProfiler`` provides a means of performing analysis on multiple halos 
-in a parallel-safe way.
-
-The halo profiler performs three primary functions: radial profiles, 
-projections, and custom analysis.  See the cookbook for a recipe demonstrating 
-all of these features.
-
-Configuring the Halo Profiler
------------------------------
-
-The only argument required to create a ``HaloProfiler`` object is the path 
-to the dataset.
-
-.. code-block:: python
-
-  from yt.analysis_modules.halo_profiler.api import *
-  hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046")
-
-Most of the halo profiler's options are configured with additional keyword 
-arguments:
-
- * **output_dir** (*str*): if specified, all output will be put into this path
-   instead of in the dataset directories.  Default: None.
-
- * **halos** (*str*): "multiple" for profiling more than one halo.  In this mode
-   halos are read in from a list or identified with a
-   `halo finder <../cookbook/running_halofinder.html>`_.  In "single" mode, the
-   one and only halo center is identified automatically as the location of the
-   peak in the density field.  Default: "multiple".
-
- * **halo_list_file** (*str*): name of file containing the list of halos.
-   The halo profiler will look for this file in the data directory.
-   Default: "HopAnalysis.out".
-
- * **halo_list_format** (*str* or *dict*): the format of the halo list file.
-   "yt_hop" for the format given by yt's halo finders.  "enzo_hop" for the
-   format written by enzo_hop.  This keyword can also be given in the form of a
-   dictionary specifying the column in which various properties can be found.
-   For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
-   Default: "yt_hop".
-
- * **halo_finder_function** (*function*): If halos is set to multiple and the
-   file given by halo_list_file does not exit, the halo finding function
-   specified here will be called.  Default: HaloFinder (yt_hop).
-
- * **halo_finder_args** (*tuple*): args given with call to halo finder function.
-   Default: None.
-
- * **halo_finder_kwargs** (*dict*): kwargs given with call to halo finder
-   function. Default: None.
-
- * **recenter** (*string* or function name): The name of a function
-   that will be used to move the center of the halo for the purposes of
-   analysis. See explanation and examples, below. Default: None, which
-   is equivalent to the center of mass of the halo as output by the halo
-   finder.
-
- * **halo_radius** (*float*): if no halo radii are provided in the halo list
-   file, this parameter is used to specify the radius out to which radial
-   profiles will be made.  This keyword is also used when halos is set to
-   single.  Default: 0.1.
-
- * **radius_units** (*str*): the units of **halo_radius**. 
-   Default: "1" (code units).
-
- * **n_profile_bins** (*int*): the number of bins in the radial profiles.
-   Default: 50.
-
- * **profile_output_dir** (*str*): the subdirectory, inside the data directory,
-   in which radial profile output files will be created.  The directory will be
-   created if it does not exist.  Default: "radial_profiles".
-
- * **projection_output_dir** (*str*): the subdirectory, inside the data
-   directory, in which projection output files will be created.  The directory
-   will be created if it does not exist.  Default: "projections".
-
- * **projection_width** (*float*): the width of halo projections.
-   Default: 8.0.
-
- * **projection_width_units** (*str*): the units of projection_width.
-   Default: "mpc".
-
- * **project_at_level** (*int* or "max"): the maximum refinement level to be
-   included in projections.  Default: "max" (maximum level within the dataset).
-
- * **velocity_center** (*list*): the method in which the halo bulk velocity is
-   calculated (used for calculation of radial and tangential velocities.  Valid
-   options are:
-   - ["bulk", "halo"] (Default): the velocity provided in the halo list
-   - ["bulk", "sphere"]: the bulk velocity of the sphere centered on the halo center.
-   - ["max", field]: the velocity of the cell that is the location of the maximum of the field specified.
-
- * **filter_quantities** (*list*): quantities from the original halo list
-   file to be written out in the filtered list file.  Default: ['id','center'].
-
- * **use_critical_density** (*bool*): if True, the definition of overdensity 
-     for virial quantities is calculated with respect to the critical 
-     density.  If False, overdensity is with respect to mean matter density, 
-     which is lower by a factor of Omega_M.  Default: False.
-
-Profiles
---------
-
-Once the halo profiler object has been instantiated, fields can be added for 
-profiling with the :meth:`add_profile` method:
-
-.. code-block:: python
-
-  hp.add_profile('cell_volume', weight_field=None, accumulation=True)
-  hp.add_profile('TotalMassMsun', weight_field=None, accumulation=True)
-  hp.add_profile('density', weight_field=None, accumulation=False)
-  hp.add_profile('temperature', weight_field='cell_mass', accumulation=False)
-  hp.make_profiles(njobs=-1, prefilters=["halo['mass'] > 1e13"],
-                   filename='VirialQuantities.h5')
-
-The :meth:`make_profiles` method will begin the profiling.  Use the
-**njobs** keyword to control the number of jobs over which the
-profiling is divided.  Setting to -1 results in a single processor per
-halo.  Setting to 1 results in all available processors working on the
-same halo.  The prefilters keyword tells the profiler to skip all halos with 
-masses (as loaded from the halo finder) less than a given amount.  See below 
-for more information.  Additional keyword arguments are:
-
- * **filename** (*str*): If set, a file will be written with all of the 
-   filtered halos and the quantities returned by the filter functions.
-   Default: None.
-
- * **prefilters** (*list*): A single dataset can contain thousands or tens of 
-   thousands of halos. Significant time can be saved by not profiling halos
-   that are certain to not pass any filter functions in place.  Simple filters 
-   based on quantities provided in the initial halo list can be used to filter 
-   out unwanted halos using this parameter.  Default: None.
-
- * **njobs** (*int*): The number of jobs over which to split the profiling.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
- * **profile_format** (*str*): The file format for the radial profiles, 
-   'ascii' or 'hdf5'.  Default: 'ascii'.
-
-.. image:: _images/profiles.png
-   :width: 500
-
-Radial profiles of Overdensity (left) and Temperature (right) for five halos.
-
-Projections
------------
-
-The process of making projections is similar to that of profiles:
-
-.. code-block:: python
-
-  hp.add_projection('density', weight_field=None)
-  hp.add_projection('temperature', weight_field='density')
-  hp.add_projection('metallicity', weight_field='density')
-  hp.make_projections(axes=[0, 1, 2], save_cube=True, save_images=True, 
-                      halo_list="filtered", njobs=-1)
-
-If **save_cube** is set to True, the projection data
-will be written to a set of hdf5 files 
-in the directory given by **projection_output_dir**. 
-The keyword, **halo_list**, can be 
-used to select between the full list of halos ("all"),
-the filtered list ("filtered"), or 
-an entirely new list given in the form of a file name.
-See :ref:`filter_functions` for a 
-discussion of filtering halos.  Use the **njobs** keyword to control
-the number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.  The keyword arguments are:
-
- * **axes** (*list*): A list of the axes to project along, using the usual 
-   0,1,2 convention. Default=[0,1,2].
-
- * **halo_list** (*str*) {'filtered', 'all'}: Which set of halos to make 
-   profiles of, either ones passed by the halo filters (if enabled/added), or 
-   all halos.  Default='filtered'.
-
- * **save_images** (*bool*): Whether or not to save images of the projections. 
-   Default=False.
-
- * **save_cube** (*bool*): Whether or not to save the HDF5 files of the halo 
-   projections.  Default=True.
-
- * **njobs** (*int*): The number of jobs over which to split the projections.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
-.. image:: _images/projections.png
-   :width: 500
-
-Projections of Density (top) and Temperature,
-weighted by Density (bottom), in the x (left), 
-y (middle), and z (right) directions for a single halo with a width of 8 Mpc.
-
-Halo Filters
-------------
-
-Filters can be added to create a refined list of
-halos based on their profiles or to avoid 
-profiling halos altogether based on information
-given in the halo list file.
-
-.. _filter_functions:
-
-Filter Functions
-^^^^^^^^^^^^^^^^
-
-It is often the case that one is looking to
-identify halos with a specific set of 
-properties.  This can be accomplished through the creation
-of filter functions.  A filter 
-function can take as many args and kwargs as you like,
-as long as the first argument is a 
-profile object, or at least a dictionary which contains
-the profile arrays for each field.  
-Filter functions must return a list of two things.
-The first is a True or False indicating 
-whether the halo passed the filter. 
-The second is a dictionary containing quantities 
-calculated for that halo that will be written to a
-file if the halo passes the filter.
-A  sample filter function based on virial quantities can be found in 
-``yt/analysis_modules/halo_profiler/halo_filters.py``.
-
-Halo filtering takes place during the call to :meth:`make_profiles`.
-The  :meth:`add_halo_filter` method is used to add a filter to be used
-during the profiling:
-
-.. code-block:: python
-
-  hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True, 
-                     overdensity_field='ActualOverdensity', 
-		     virial_overdensity=200, 
-		     virial_filters=[['TotalMassMsun','>=','1e14']],
-		     virial_quantities=['TotalMassMsun','RadiusMpc'],
-		     use_log=True)
-
-The addition above will calculate and return virial quantities,
-mass and radius, for an 
-overdensity of 200.  In order to pass the filter, at least one
-point in the profile must be 
-above the specified overdensity and the virial mass must be at
-least 1e14 solar masses.  The **use_log** keyword indicates that interpolation 
-should be done in log space.  If 
-the VirialFilter function has been added to the filter list,
-the halo profiler will make 
-sure that the fields necessary for calculating virial quantities are added.
-As  many filters as desired can be added.  If filters have been added,
-the next call to :meth:`make_profiles` will filter by all of
-the added filter functions:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out")
-
-If the **filename** keyword is set, a file will be written with all of the 
-filtered halos and the quantities returned by the filter functions.
-
-.. note:: If the profiles have already been run, the halo profiler will read
-   in the previously created output files instead of re-running the profiles.
-   The halo profiler will check to make sure the output file contains all of
-   the requested halo fields.  If not, the profile will be made again from
-   scratch.
-
-.. _halo_profiler_pre_filters:
-
-Pre-filters
-^^^^^^^^^^^
-
-A single dataset can contain thousands or tens of thousands of halos.
-Significant time can 
-be saved by not profiling halos that are certain to not pass any filter
-functions in place.  
-Simple filters based on quantities provided in the initial halo list
-can be used to filter 
-out unwanted halos using the **prefilters** keyword:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out",
-		   prefilters=["halo['mass'] > 1e13"])
-
-Arguments provided with the **prefilters** keyword should be given
-as a list of strings.  
-Each string in the list will be evaluated with an *eval*.
-
-.. note:: If a VirialFilter function has been added with a filter based
-   on mass (as in the example above), a prefilter will be automatically
-   added to filter out halos with masses greater or less than (depending
-   on the conditional of the filter) a factor of ten of the specified
-   virial mass.
-
-Recentering the Halo For Analysis
----------------------------------
-
-It is possible to move the center of the halo to a new point using an
-arbitrary function for making profiles.
-By default, the center is provided by the halo finder,
-which outputs the center of mass of the particles. For the purposes of
-analysis, it may be important to recenter onto a gas density maximum,
-or a temperature minimum.
-
-There are a number of built-in functions to do this, listed below.
-Each of the functions uses mass-weighted fields for the calculations
-of new center points.
-To use
-them, supply the HaloProfiler with the ``recenter`` option and 
-the name of the function, as in the example below.
-
-.. code-block:: python
-
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter="Max_Dark_Matter_Density")
-
-Additional options are:
-
-  * *Min_Dark_Matter_Density* - Recenter on the point of minimum dark matter
-    density in the halo.
-
-  * *Max_Dark_Matter_Density* - Recenter on the point of maximum dark matter
-    density in the halo.
-
-  * *CoM_Dark_Matter_Density* - Recenter on the center of mass of the dark
-    matter density field. This will be very similar to what the halo finder
-    provides, but not precisely similar.
-
-  * *Min_Gas_Density* - Recenter on the point of minimum gas density in the
-    halo.
-
-  * *Max_Gas_Density* - Recenter on the point of maximum gas density in the
-    halo.
-
-  * *CoM_Gas_Density* - Recenter on the center of mass of the gas density field
-    in the halo.
-
-  * *Min_Total_Density* - Recenter on the point of minimum total (gas + dark
-    matter) density in the halo.
-
-  * *Max_Total_Density* - Recenter on the point of maximum total density in the
-    halo.
-
-  * *CoM_Total_Density* - Recenter on the center of mass for the total density
-    in the halo.
-
-  * *Min_Temperature* - Recenter on the point of minimum temperature in the
-    halo.
-
-  * *Max_Temperature* - Recenter on the point of maximum temperature in the
-    halo.
-
-It is also possible to supply a user-defined function to the HaloProfiler.
-This can be used if the pre-defined functions above are not sufficient.
-The function takes a single argument, a data container for the halo,
-which is a sphere. The function returns a 3-list with the new center.
-
-In this example below, a function is used such that the halos will be
-re-centered on the point of absolute minimum temperature, that is not
-mass weighted.
-
-.. code-block:: python
-
-   from yt.mods import *
-   
-   def find_min_temp(sphere):
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", recenter=find_min_temp)
-
-It is possible to make more complicated functions. This example below extends
-the example above to include a distance control that prevents the center from
-being moved too far. If the recenter moves too far, ``[-1, -1, -1]`` is
-returned which will prevent the halo from being profiled.
-Any triplet of values less than the ``domain_left_edge`` will suffice.
-There will be a note made in the output (stderr) showing which halos were
-skipped.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.utilities.math_utils import periodic_dist
-   
-   def find_min_temp_dist(sphere):
-       old = sphere.center
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       d = sphere.pf['kpc'] * periodic_dist(old, [mx, my, mz],
-           sphere.pf.domain_right_edge - sphere.pf.domain_left_edge)
-       # If new center farther than 5 kpc away, don't recenter
-       if d > 5.: return [-1, -1, -1]
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter=find_min_temp_dist)
-
-Custom Halo Analysis
---------------------
-
-Besides radial profiles and projections, the halo profiler has the
-ability to run custom analysis functions on each halo.  Custom halo
-analysis functions take two arguments: a halo dictionary containing
-the id, center, etc; and a sphere object.  The example function shown
-below creates a 2D profile of the total mass in bins of density and
-temperature for a given halo.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.data_objects.profiles import BinnedProfile2D
-
-   def halo_2D_profile(halo, sphere):
-       "Make a 2D profile for a halo."
-       my_profile = BinnedProfile2D(sphere,
-             128, 'density', 1e-30, 1e-24, True,
-             128, 'temperature', 1e2, 1e7, True,
-             end_collect=False)
-       my_profile.add_fields('cell_mass', weight=None, fractional=False)
-       my_filename = os.path.join(sphere.pf.fullpath, '2D_profiles', 
-             'Halo_%04d.h5' % halo['id'])
-       my_profile.write_out_h5(my_filename)
-
-Using the  :meth:`analyze_halo_spheres` function, the halo profiler
-will create a sphere centered on each halo, and perform the analysis
-from the custom routine.
-
-.. code-block:: python
-
-    hp.analyze_halo_sphere(halo_2D_profile, halo_list='filtered',
-                           analysis_output_dir='2D_profiles', 
-                           njobs=-1, dynamic=False)
-
-Just like with the :meth:`make_projections` function, the keyword,
-**halo_list**, can be used to select between the full list of halos
-("all"), the filtered list ("filtered"), or an entirely new list given
-in the form of a file name.  If the **analysis_output_dir** keyword is
-set, the halo profiler will make sure the desired directory exists in
-a parallel-safe manner.  Use the **njobs** keyword to control the
-number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/halo_transition.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -0,0 +1,106 @@
+
+Getting up to Speed with Halo Analysis in yt-3.0
+================================================
+
+If you're used to halo analysis in yt-2.x, heres a guide to
+how to update your analysis pipeline to take advantage of
+the new halo catalog infrastructure. 
+
+Finding Halos
+-------------
+
+Previously, halos were found using calls to ``HaloFinder``, 
+``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is 
+encouraged that you find the halos upon creation of the halo catalog 
+by supplying a value to the ``finder_method`` keyword when calling
+``HaloCatalog``. Currently, only halos found using rockstar or a 
+previous instance of a halo catalog are able to be loaded 
+using the ``halos_ds`` keyword.
+
+To pass additional arguments to the halo finders 
+themselves, supply a dictionary to ``finder_kwargs`` where
+each key in the dictionary is a keyword of the halo finder
+and the corresponding value is the value to be passed for
+that keyword.
+
+Getting Halo Information
+------------------------
+All quantities that used to be present in a ``halo_list`` are
+still able to be found but are not necessarily included by default.
+Every halo will by default have the following properties:
+
+* particle_position_i (where i can be x,y,z)
+* particle_mass
+* virial_radius
+* particle_identifier
+
+If other quantities are desired, they can be included by adding
+the corresponding quantity before the catalog is created. See
+the full halo catalog documentation for further information about
+how to add these quantities and what quantities are available.
+
+You no longer have to iteratre over halos in the ``halo_list``.
+Now a halo dataset can be treated as a regular dataset and 
+all quantities are available by accessing ``all_data``.
+Specifically, all quantities can be accessed as shown:
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+   ad = hc.all_data()
+   masses = ad['particle_mass'][:]
+
+
+Prefiltering Halos
+------------------
+
+Prefiltering halos before analysis takes place is now done
+by adding a filter before the call to create. An example
+is shown below
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
+   hc.create()
+
+Profiling Halos
+---------------
+
+The halo profiler available in yt-2.x has been removed, and
+profiling functionality is now completely contained within the
+halo catalog. A complete example of how to profile halos by 
+radius using the new infrastructure is given in 
+:ref:`halo_analysis_example`. 
+
+Plotting Halos
+--------------
+
+Annotating halo locations onto a slice or projection works in 
+the same way as in yt-2.x, but now a halo catalog must be
+passed to the annotate halo call rather than a halo list.
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+
+   prj = ProjectionPlot(data_ds, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
+
+Written Data
+------------
+
+Data is now written out in the form of h5 files rather than
+text files. The directory they are written out to is 
+controlled by the keyword ``output_dir``. Each quantity
+is a field in the file.

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ b/doc/source/analyzing/analysis_modules/hmf_howto.rst
@@ -27,8 +27,8 @@
 .. code-block:: python
 
   from yt.mods import *
-  pf = load("data0001")
-  halo_list = HaloFinder(pf)
+  ds = load("data0001")
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
 
 The only important columns of data in the text file ``HopAnalysis.out``
@@ -79,8 +79,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0001")
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  ds = load("data0001")
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')
@@ -107,9 +107,9 @@
   from yt.analysis_modules.halo_mass_function.api import *
   
   # If desired, start loop here.
-  pf = load("data0001")
+  ds = load("data0001")
   
-  halo_list = HaloFinder(pf)
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
   
   hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
@@ -120,7 +120,7 @@
                 virial_quantities=['TotalMassMsun','RadiusMpc'])
   hp.make_profiles(filename="VirialHaloes.out")
   
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -60,7 +60,7 @@
    when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
-   pf.parameters.  Default: None.
+   ds.parameters.  Default: None.
 
  * **output_dir** (*string*): The directory in which images and data files
     will be written.  Default: 'LC'.

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/merger_tree.rst
--- a/doc/source/analyzing/analysis_modules/merger_tree.rst
+++ b/doc/source/analyzing/analysis_modules/merger_tree.rst
@@ -2,8 +2,9 @@
 
 Halo Merger Tree
 ================
-.. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
-.. versionadded:: 1.7
+
+.. note:: At the moment the merger tree is not yet implemented using new 
+    halo catalog functionality. 
 
 The Halo Merger Tree extension is capable of building a database of halo mergers
 over a set of time-ordered Enzo datasets. The fractional contribution of older

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -48,7 +48,7 @@
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+    ds = load("MHDSloshing/virgo_low_res.0054.vtk",
               parameters={"time_unit":(1.0,"Myr"),
                           "length_unit":(1.0,"Mpc"),
                           "mass_unit":(1.0e14,"Msun")}) 
@@ -423,7 +423,7 @@
 evacuated two "bubbles" of radius 30 kpc at a distance of 50 kpc from
 the center. 
 
-Now, we create a parameter file out of this dataset:
+Now, we create a yt Dataset object out of this dataset:
 
 .. code:: python
 
@@ -445,7 +445,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ b/doc/source/analyzing/analysis_modules/radial_column_density.rst
@@ -41,15 +41,15 @@
 
   from yt.mods import *
   from yt.analysis_modules.radial_column_density.api import *
-  pf = load("data0030")
+  ds = load("data0030")
   
-  rcdnumdens = RadialColumnDensity(pf, 'NumberDensity', [0.5, 0.5, 0.5],
+  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
     max_radius = 0.5)
   def _RCDNumberDensity(field, data, rcd = rcdnumdens):
       return rcd._build_derived_field(data)
   add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
   
-  dd = pf.h.all_data()
+  dd = ds.all_data()
   print dd['RCDNumberDensity']
 
 The field ``RCDNumberDensity`` can be used just like any other derived field

diff -r 3f2098b833afb50cd45bf2b54d9f08f9c95ca374 -r 7c7dd952078cff9d8f031a43490b337797338e50 doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -41,8 +41,8 @@
 
 .. code-block:: python
 
-    pf = load("galaxy0030/galaxy0030")
-    writer = RadMC3DWriter(pf)
+    ds = load("galaxy0030/galaxy0030")
+    writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
     writer.write_dust_file("DustDensity", "dust_density.inp")
@@ -87,8 +87,8 @@
         return (x_co/mu_h)*data["density"]
     add_field("NumberDensityCO", function=_NumberDensityCO)
     
-    pf = load("galaxy0030/galaxy0030")
-    writer = RadMC3DWriter(pf)
+    ds = load("galaxy0030/galaxy0030")
+    writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
     writer.write_line_file("NumberDensityCO", "numberdens_co.inp")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/e1b78cda9ec7/
Changeset:   e1b78cda9ec7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-18 23:53:31
Summary:     One more field name fix, and debug output.
Affected #:  1 file

diff -r 18bbebc1adb17b03502edd37cdc18c29dd9fd9a7 -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -104,7 +104,7 @@
 
     def find_children(self, min_val, max_val = None):
         if self.children is not None:
-            print "Wiping out existing children clumps."
+            print "Wiping out existing children clumps.", len(self.children)
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
@@ -195,7 +195,7 @@
             elif (child._isValid()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children


https://bitbucket.org/yt_analysis/yt/commits/b3eb46662e76/
Changeset:   b3eb46662e76
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 15:55:21
Summary:     Merging
Affected #:  350 files

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -208,38 +208,38 @@
 After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
-\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{ds = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
 \texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
 numpy array \texttt{a}. Similarly for other data containers.\\
-\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
-\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
-\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
+ \texttt{bl = ds.boolean({\it constructor})} \textemdash\ Create a boolean data
  container. {\it constructor} is a list of pre-defined non-boolean 
  data containers with nested boolean logic using the
  ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
  {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
  by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
  
-\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
 \subsection{Defining New Fields \& Quantities}
@@ -261,15 +261,15 @@
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+\texttt{slc = SlicePlot(ds, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
 perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
 {\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
 \texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
-\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = OffAxisSlicePlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
+\texttt{prj = OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -365,8 +365,8 @@
 \subsection{FAQ}
 \settowidth{\MyLen}{\texttt{multicol}}
 
-\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
-Must enter \texttt{pf.h} before this command. \\
+\texttt{ds.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
+Must enter \texttt{ds.index} before this command. \\
 
 
 %\rule{0.3\linewidth}{0.25pt}

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -49,7 +49,7 @@
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
- * Parameter files are a last resort.
+ * Parameter files external to yt are a last resort.
  * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
@@ -61,7 +61,7 @@
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
      parameters are now properties on a Dataset subclass: you access them
-     like pf.refine_by .
+     like ds.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality
      * TopGridDimensions => domain_dimensions

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -73,7 +73,7 @@
     Examples
     --------
     These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
     a plot collection, 'c' for a center, and 'L' for a vector. 
 
     >>> a=[1,2,3]

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -19,7 +19,7 @@
 useful variable names that correspond to specific instances that the user is
 presupposed to have created.
 
-   * `pf`: a parameter file, loaded successfully
+   * `ds`: a dataset, loaded successfully
    * `sp`: a sphere
    * `c`: a 3-component "center"
    * `L`: a 3-component vector that corresponds to either angular momentum or a

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/helper_scripts/parse_cb_list.py
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/visualizing/_cb_docstrings.inc", "w")
 

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/helper_scripts/parse_dq_list.py
--- a/doc/helper_scripts/parse_dq_list.py
+++ b/doc/helper_scripts/parse_dq_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_dq_docstrings.inc", "w")
 
@@ -29,7 +29,7 @@
                             docstring = docstring))
                             #docstring = "\n".join(tw.wrap(docstring))))
 
-dd = pf.h.all_data()
+dd = ds.all_data()
 for n,func in sorted(dd.quantities.functions.items()):
     print n, func
     write_docstring(output, n, func[1])

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/helper_scripts/parse_object_list.py
--- a/doc/helper_scripts/parse_object_list.py
+++ b/doc/helper_scripts/parse_object_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_obj_docstrings.inc", "w")
 
@@ -27,7 +27,7 @@
     f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
                             docstring = 'physical-object-api'))
 
-for n,c in sorted(pf.h.__dict__.items()):
+for n,c in sorted(ds.__dict__.items()):
     if hasattr(c, '_con_args'):
         print n
         write_docstring(output, n, c)

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,15 +17,15 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
+Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
 .. notebook-cell::
 
   from yt.mods import *
-  pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.field_list):
+  ds = load("Enzo_64/DD0043/data0043")
+  for i in sorted(ds.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ b/doc/source/analyzing/_obj_docstrings.inc
@@ -1,12 +1,12 @@
 
 
-.. class:: boolean(self, regions, fields=None, pf=None, **field_parameters):
+.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
 
 
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, pf=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
+.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
@@ -24,13 +24,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
 
 
-.. class:: disk(self, center, normal, radius, height, fields=None, pf=None, **field_parameters):
+.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
 
 
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, pf=None, **field_parameters):
+.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
@@ -48,79 +48,79 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
 
 
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, pf=None, **field_parameters):
+.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
 
 
-.. class:: grid_collection(self, center, grid_list, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
 
 
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
 
 
-.. class:: inclined_box(self, origin, box_vectors, fields=None, pf=None, **field_parameters):
+.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
 
 
-.. class:: ortho_ray(self, axis, coords, fields=None, pf=None, **field_parameters):
+.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
 
 
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
+.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
 
 
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
 
 
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
 
 
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
+.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
 
 
-.. class:: ray(self, start_point, end_point, fields=None, pf=None, **field_parameters):
+.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
 
 
-.. class:: region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
 
 
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
 
 
-.. class:: slice(self, axis, coord, fields=None, center=None, pf=None, node_name=False, **field_parameters):
+.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
@@ -132,13 +132,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
 
 
-.. class:: sphere(self, center, radius, fields=None, pf=None, **field_parameters):
+.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
 
 
-.. class:: streamline(self, positions, length=1.0, fields=None, pf=None, **field_parameters):
+.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -44,7 +44,7 @@
       "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -62,7 +62,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+      "halos_ds = load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -80,7 +80,7 @@
      "collapsed": false,
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
       "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -295,9 +295,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -407,4 +407,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -222,7 +222,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"cube.fits\")"
+      "ds = load(\"cube.fits\")"
      ],
      "language": "python",
      "metadata": {},
@@ -233,7 +233,7 @@
      "collapsed": false,
      "input": [
       "# Specifying no center gives us the center slice\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"])\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -246,9 +246,9 @@
      "input": [
       "import yt.units as u\n",
       "# Picking different velocities for the slices\n",
-      "new_center = pf.domain_center\n",
-      "new_center[2] = pf.spec2pixel(-1.0*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -259,8 +259,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(0.7*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -271,8 +271,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "new_center[2] = pf.spec2pixel(-0.3*u.km/u.s)\n",
-      "slc = SlicePlot(pf, \"z\", [\"density\"], center=new_center)\n",
+      "new_center[2] = ds.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -290,7 +290,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "prj = ProjectionPlot(pf, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj = ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
       "prj.set_log(\"density\", True)\n",
       "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
       "prj.show()"
@@ -303,4 +303,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -84,8 +84,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=True, opening_angle=2.0)
@@ -97,8 +97,8 @@
   
   from yt.mods import *
   
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
+  ds = load("DD0000")
+  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
   
   ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
       treecode=False)

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,8 +58,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(pf)
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters
@@ -69,8 +69,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  haloes = LoadHaloes(pf, 'MyHaloList')
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
 object "haloes", you can get loop over the list of haloes and do
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.ellipsoid(ell_param[0],
+  ell = ds.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -8,6 +8,7 @@
    :maxdepth: 1
 
    halo_catalogs
+   halo_transition
    halo_finding
    halo_mass_function
    halo_analysis_example

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,9 +7,11 @@
 together into a single framework. This framework is substantially
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.0 please see :ref:`halo_transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
-catalog through data_pf. These halos can be found using friends-of-friends,
+catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
 use. The available arguments are 'fof', 'hop', and'rockstar'. For more
 details on the relative differences between these halo finders see 
@@ -19,32 +21,32 @@
 
    from yt.mods import *
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
 HOP finders. Even though rockstar creates one file per processor, 
 specifying any one file allows the full catalog to be loaded. Here we 
 only specify the file output by the processor with ID 0. Note that the 
-argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
 any new information about the identified halos. To associate the halos 
 with the dataset from which they were found, supply arguments to both 
-halos_pf and data_pf.
+halos_ds and data_ds.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -215,8 +217,8 @@
 
 .. code-block:: python
 
-   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-   hc = HaloCatalog(halos_pf=hpf,
+   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_finders.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_finders.rst
@@ -0,0 +1,192 @@
+.. _halo_finding:
+
+Halo Finding
+============
+
+There are four methods of finding particle haloes in yt. The 
+recommended and default method is called HOP, a method described 
+in `Eisenstein and Hut (1998) 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_. A basic 
+friends-of-friends (e.g. `Efstathiou et al. (1985) 
+<http://adsabs.harvard.edu/abs/1985ApJS...57..241E>`_) halo 
+finder is also implemented. Finally Rockstar (`Behroozi et a. 
+(2011) <http://adsabs.harvard.edu/abs/2011arXiv1110.4372B>`_) is 
+a 6D-phase space halo finder developed by Peter Behroozi that 
+excels in finding subhalos and substrcture, but does not allow 
+multiple particle masses.
+
+HOP
+---
+
+The version of HOP used in yt is an upgraded version of the 
+`publicly available HOP code 
+<http://cmb.as.arizona.edu/~eisenste/hop/hop.html>`_. Support 
+for 64-bit floats and integers has been added, as well as 
+parallel analysis through spatial decomposition. HOP builds 
+groups in this fashion:
+
+  1. Estimates the local density at each particle using a 
+       smoothing kernel.
+  2. Builds chains of linked particles by 'hopping' from one 
+       particle to its densest neighbor. A particle which is 
+       its own densest neighbor is the end of the chain.
+  3. All chains that share the same densest particle are 
+       grouped together.
+  4. Groups are included, linked together, or discarded 
+       depending on the user-supplied over density
+       threshold parameter. The default is 160.0.
+
+Please see the `HOP method paper 
+<http://adsabs.harvard.edu/abs/1998ApJ...498..137E>`_ for 
+full details.
+
+.. warning:: The FoF halo finder in yt is not thoroughly tested! 
+    It is probably fine to use, but you are strongly encouraged 
+    to check your results against the data for errors.
+
+Rockstar Halo Finding
+---------------------
+
+Rockstar uses an adaptive hierarchical refinement of friends-of-friends 
+groups in six phase-space dimensions and one time dimension, which 
+allows for robust (grid-independent, shape-independent, and noise-
+resilient) tracking of substructure. The code is prepackaged with yt, 
+but also `separately available <http://code.google.com/p/rockstar>`_. The lead 
+developer is Peter Behroozi, and the methods are described in `Behroozi
+et al. 2011 <http://rockstar.googlecode.com/files/rockstar_ap101911.pdf>`_. 
+
+.. note:: At the moment, Rockstar does not support multiple particle masses, 
+  instead using a fixed particle mass. This will not affect most dark matter 
+  simulations, but does make it less useful for finding halos from the stellar
+  mass. In simulations where the highest-resolution particles all have the 
+  same mass (ie: zoom-in grid based simulations), one can set up a particle
+  filter to select the lowest mass particles and perform the halo finding
+  only on those.
+
+To run the Rockstar Halo finding, you must launch python with MPI and 
+parallelization enabled. While Rockstar itself does not require MPI to run, 
+the MPI libraries allow yt to distribute particle information across multiple 
+nodes.
+
+.. warning:: At the moment, running Rockstar inside of yt on multiple compute nodes
+   connected by an Infiniband network can be problematic. Therefore, for now
+   we recommend forcing the use of the non-Infiniband network (e.g. Ethernet)
+   using this flag: ``--mca btl ^openib``.
+   For example, here is how Rockstar might be called using 24 cores:
+   ``mpirun -n 24 --mca btl ^openib python ./run_rockstar.py --parallel``.
+
+The script above configures the Halo finder, launches a server process which 
+disseminates run information and coordinates writer-reader processes. 
+Afterwards, it launches reader and writer tasks, filling the available MPI 
+slots, which alternately read particle information and analyze for halo 
+content.
+
+The RockstarHaloFinder class has these options that can be supplied to the 
+halo catalog through the ``finder_kwargs`` argument:
+
+  * ``dm_type``, the index of the dark matter particle. Default is 1. 
+  * ``outbase``, This is where the out*list files that Rockstar makes should be
+    placed. Default is 'rockstar_halos'.
+  * ``num_readers``, the number of reader tasks (which are idle most of the 
+    time.) Default is 1.
+  * ``num_writers``, the number of writer tasks (which are fed particles and
+    do most of the analysis). Default is MPI_TASKS-num_readers-1. 
+    If left undefined, the above options are automatically 
+    configured from the number of available MPI tasks.
+  * ``force_res``, the resolution that Rockstar uses for various calculations
+    and smoothing lengths. This is in units of Mpc/h.
+    If no value is provided, this parameter is automatically set to
+    the width of the smallest grid element in the simulation from the
+    last data snapshot (i.e. the one where time has evolved the
+    longest) in the time series:
+    ``ds_last.index.get_smallest_dx() * ds_last['mpch']``.
+  * ``total_particles``, if supplied, this is a pre-calculated
+    total number of dark matter
+    particles present in the simulation. For example, this is useful
+    when analyzing a series of snapshots where the number of dark
+    matter particles should not change and this will save some disk
+    access time. If left unspecified, it will
+    be calculated automatically. Default: ``None``.
+  * ``dm_only``, if set to ``True``, it will be assumed that there are
+    only dark matter particles present in the simulation.
+    This option does not modify the halos found by Rockstar, however
+    this option can save disk access time if there are no star particles
+    (or other non-dark matter particles) in the simulation. Default: ``False``.
+
+Rockstar dumps halo information in a series of text (halo*list and 
+out*list) and binary (halo*bin) files inside the ``outbase`` directory. 
+We use the halo list classes to recover the information. 
+
+Inside the ``outbase`` directory there is a text file named ``datasets.txt``
+that records the connection between ds names and the Rockstar file names.
+
+Parallel HOP and FOF
+--------------------
+
+Both the HOP and FoF halo finders can run in parallel using simple 
+spatial decomposition. In order to run them in parallel it is helpful 
+to understand how it works. Below in the first plot (i) is a simplified 
+depiction of three haloes labeled 1,2 and 3:
+
+.. image:: _images/ParallelHaloFinder.png
+   :width: 500
+
+Halo 3 is twice reflected around the periodic boundary conditions.
+
+In (ii), the volume has been sub-divided into four equal subregions, 
+A,B,C and D, shown with dotted lines. Notice that halo 2 is now in 
+two different subregions, C and D, and that halo 3 is now in three, 
+A, B and D. If the halo finder is run on these four separate subregions,
+halo 1 is be identified as a single halo, but haloes 2 and 3 are split 
+up into multiple haloes, which is incorrect. The solution is to give 
+each subregion padding to oversample into neighboring regions.
+
+In (iii), subregion C has oversampled into the other three regions, 
+with the periodic boundary conditions taken into account, shown by 
+dot-dashed lines. The other subregions oversample in a similar way.
+
+The halo finder is then run on each padded subregion independently 
+and simultaneously. By oversampling like this, haloes 2 and 3 will 
+both be enclosed fully in at least one subregion and identified 
+completely.
+
+Haloes identified with centers of mass inside the padded part of a 
+subregion are thrown out, eliminating the problem of halo duplication. 
+The centers for the three haloes are shown with stars. Halo 1 will
+belong to subregion A, 2 to C and 3 to B.
+
+To run with parallel halo finding, you must supply a value for 
+padding in the finder_kwargs argument. The ``padding`` parameter 
+is in simulation units and defaults to 0.02. This parameter is how 
+much padding is added to each of the six sides of a subregion. 
+This value should be 2x-3x larger than the largest expected halo 
+in the simulation. It is unlikely, of course, that the largest 
+object in the simulation will be on a subregion boundary, but there 
+is no way of knowing before the halo finder is run.
+
+.. code-block:: python
+
+  from yt.mods import *
+  from yt.analysis_modules.halo_analysis.api import *
+  ds = load("data0001")
+  hc= HaloCatalog(data_ds =ds,finder_method='hop'
+    finder_kwargs={'padding':0.02})
+  # --or--
+  hc= HaloCatalog(data_ds =ds,finder_method='fof'
+    finder_kwargs={'padding':0.02})
+
+
+In general, a little bit of padding goes a long way, and too much 
+just slows down the analysis and doesn't improve the answer (but 
+doesn't change it).  It may be worth your time to run the parallel 
+halo finder at a few paddings to find the right amount, especially 
+if you're analyzing many similar datasets.
+
+Rockstar Installation
+=====================
+
+The Rockstar is slightly patched and modified to run as a library inside of 
+yt. By default it will be built with yt using the ``install_script.sh``.
+If it wasn't installed, please make sure that the installation setting
+``INST_ROCKSTAR=1`` is defined in the ``install_script.sh`` and re-run
+the installation script.

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -60,8 +60,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", num_sigma_bins=200,
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", num_sigma_bins=200,
   mass_column=5)
 
 Attached to ``hmf`` is the convenience function ``write_out``, which saves
@@ -102,8 +102,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", 
+  ds = load("data0030")
+  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4)
   hmf.write_out(prefix='hmf')

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_profiling.rst
--- a/doc/source/analyzing/analysis_modules/halo_profiling.rst
+++ /dev/null
@@ -1,451 +0,0 @@
-.. _halo_profiling:
-
-Halo Profiling
-==============
-.. sectionauthor:: Britton Smith <brittonsmith at gmail.com>,
-   Stephen Skory <s at skory.us>
-
-The ``HaloProfiler`` provides a means of performing analysis on multiple halos 
-in a parallel-safe way.
-
-The halo profiler performs three primary functions: radial profiles, 
-projections, and custom analysis.  See the cookbook for a recipe demonstrating 
-all of these features.
-
-Configuring the Halo Profiler
------------------------------
-
-The only argument required to create a ``HaloProfiler`` object is the path 
-to the dataset.
-
-.. code-block:: python
-
-  from yt.analysis_modules.halo_profiler.api import *
-  hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046")
-
-Most of the halo profiler's options are configured with additional keyword 
-arguments:
-
- * **output_dir** (*str*): if specified, all output will be put into this path
-   instead of in the dataset directories.  Default: None.
-
- * **halos** (*str*): "multiple" for profiling more than one halo.  In this mode
-   halos are read in from a list or identified with a
-   `halo finder <../cookbook/running_halofinder.html>`_.  In "single" mode, the
-   one and only halo center is identified automatically as the location of the
-   peak in the density field.  Default: "multiple".
-
- * **halo_list_file** (*str*): name of file containing the list of halos.
-   The halo profiler will look for this file in the data directory.
-   Default: "HopAnalysis.out".
-
- * **halo_list_format** (*str* or *dict*): the format of the halo list file.
-   "yt_hop" for the format given by yt's halo finders.  "enzo_hop" for the
-   format written by enzo_hop.  This keyword can also be given in the form of a
-   dictionary specifying the column in which various properties can be found.
-   For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
-   Default: "yt_hop".
-
- * **halo_finder_function** (*function*): If halos is set to multiple and the
-   file given by halo_list_file does not exit, the halo finding function
-   specified here will be called.  Default: HaloFinder (yt_hop).
-
- * **halo_finder_args** (*tuple*): args given with call to halo finder function.
-   Default: None.
-
- * **halo_finder_kwargs** (*dict*): kwargs given with call to halo finder
-   function. Default: None.
-
- * **recenter** (*string* or function name): The name of a function
-   that will be used to move the center of the halo for the purposes of
-   analysis. See explanation and examples, below. Default: None, which
-   is equivalent to the center of mass of the halo as output by the halo
-   finder.
-
- * **halo_radius** (*float*): if no halo radii are provided in the halo list
-   file, this parameter is used to specify the radius out to which radial
-   profiles will be made.  This keyword is also used when halos is set to
-   single.  Default: 0.1.
-
- * **radius_units** (*str*): the units of **halo_radius**. 
-   Default: "1" (code units).
-
- * **n_profile_bins** (*int*): the number of bins in the radial profiles.
-   Default: 50.
-
- * **profile_output_dir** (*str*): the subdirectory, inside the data directory,
-   in which radial profile output files will be created.  The directory will be
-   created if it does not exist.  Default: "radial_profiles".
-
- * **projection_output_dir** (*str*): the subdirectory, inside the data
-   directory, in which projection output files will be created.  The directory
-   will be created if it does not exist.  Default: "projections".
-
- * **projection_width** (*float*): the width of halo projections.
-   Default: 8.0.
-
- * **projection_width_units** (*str*): the units of projection_width.
-   Default: "mpc".
-
- * **project_at_level** (*int* or "max"): the maximum refinement level to be
-   included in projections.  Default: "max" (maximum level within the dataset).
-
- * **velocity_center** (*list*): the method in which the halo bulk velocity is
-   calculated (used for calculation of radial and tangential velocities.  Valid
-   options are:
-   - ["bulk", "halo"] (Default): the velocity provided in the halo list
-   - ["bulk", "sphere"]: the bulk velocity of the sphere centered on the halo center.
-   - ["max", field]: the velocity of the cell that is the location of the maximum of the field specified.
-
- * **filter_quantities** (*list*): quantities from the original halo list
-   file to be written out in the filtered list file.  Default: ['id','center'].
-
- * **use_critical_density** (*bool*): if True, the definition of overdensity 
-     for virial quantities is calculated with respect to the critical 
-     density.  If False, overdensity is with respect to mean matter density, 
-     which is lower by a factor of Omega_M.  Default: False.
-
-Profiles
---------
-
-Once the halo profiler object has been instantiated, fields can be added for 
-profiling with the :meth:`add_profile` method:
-
-.. code-block:: python
-
-  hp.add_profile('cell_volume', weight_field=None, accumulation=True)
-  hp.add_profile('TotalMassMsun', weight_field=None, accumulation=True)
-  hp.add_profile('density', weight_field=None, accumulation=False)
-  hp.add_profile('temperature', weight_field='cell_mass', accumulation=False)
-  hp.make_profiles(njobs=-1, prefilters=["halo['mass'] > 1e13"],
-                   filename='VirialQuantities.h5')
-
-The :meth:`make_profiles` method will begin the profiling.  Use the
-**njobs** keyword to control the number of jobs over which the
-profiling is divided.  Setting to -1 results in a single processor per
-halo.  Setting to 1 results in all available processors working on the
-same halo.  The prefilters keyword tells the profiler to skip all halos with 
-masses (as loaded from the halo finder) less than a given amount.  See below 
-for more information.  Additional keyword arguments are:
-
- * **filename** (*str*): If set, a file will be written with all of the 
-   filtered halos and the quantities returned by the filter functions.
-   Default: None.
-
- * **prefilters** (*list*): A single dataset can contain thousands or tens of 
-   thousands of halos. Significant time can be saved by not profiling halos
-   that are certain to not pass any filter functions in place.  Simple filters 
-   based on quantities provided in the initial halo list can be used to filter 
-   out unwanted halos using this parameter.  Default: None.
-
- * **njobs** (*int*): The number of jobs over which to split the profiling.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
- * **profile_format** (*str*): The file format for the radial profiles, 
-   'ascii' or 'hdf5'.  Default: 'ascii'.
-
-.. image:: _images/profiles.png
-   :width: 500
-
-Radial profiles of Overdensity (left) and Temperature (right) for five halos.
-
-Projections
------------
-
-The process of making projections is similar to that of profiles:
-
-.. code-block:: python
-
-  hp.add_projection('density', weight_field=None)
-  hp.add_projection('temperature', weight_field='density')
-  hp.add_projection('metallicity', weight_field='density')
-  hp.make_projections(axes=[0, 1, 2], save_cube=True, save_images=True, 
-                      halo_list="filtered", njobs=-1)
-
-If **save_cube** is set to True, the projection data
-will be written to a set of hdf5 files 
-in the directory given by **projection_output_dir**. 
-The keyword, **halo_list**, can be 
-used to select between the full list of halos ("all"),
-the filtered list ("filtered"), or 
-an entirely new list given in the form of a file name.
-See :ref:`filter_functions` for a 
-discussion of filtering halos.  Use the **njobs** keyword to control
-the number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.  The keyword arguments are:
-
- * **axes** (*list*): A list of the axes to project along, using the usual 
-   0,1,2 convention. Default=[0,1,2].
-
- * **halo_list** (*str*) {'filtered', 'all'}: Which set of halos to make 
-   profiles of, either ones passed by the halo filters (if enabled/added), or 
-   all halos.  Default='filtered'.
-
- * **save_images** (*bool*): Whether or not to save images of the projections. 
-   Default=False.
-
- * **save_cube** (*bool*): Whether or not to save the HDF5 files of the halo 
-   projections.  Default=True.
-
- * **njobs** (*int*): The number of jobs over which to split the projections.  
-   Set to -1 so that each halo is done by a single processor.  Default: -1.
-
- * **dynamic** (*bool*): If True, distribute halos using a task queue.  If 
-   False, distribute halos evenly over all jobs.  Default: False.
-
-.. image:: _images/projections.png
-   :width: 500
-
-Projections of Density (top) and Temperature,
-weighted by Density (bottom), in the x (left), 
-y (middle), and z (right) directions for a single halo with a width of 8 Mpc.
-
-Halo Filters
-------------
-
-Filters can be added to create a refined list of
-halos based on their profiles or to avoid 
-profiling halos altogether based on information
-given in the halo list file.
-
-.. _filter_functions:
-
-Filter Functions
-^^^^^^^^^^^^^^^^
-
-It is often the case that one is looking to
-identify halos with a specific set of 
-properties.  This can be accomplished through the creation
-of filter functions.  A filter 
-function can take as many args and kwargs as you like,
-as long as the first argument is a 
-profile object, or at least a dictionary which contains
-the profile arrays for each field.  
-Filter functions must return a list of two things.
-The first is a True or False indicating 
-whether the halo passed the filter. 
-The second is a dictionary containing quantities 
-calculated for that halo that will be written to a
-file if the halo passes the filter.
-A  sample filter function based on virial quantities can be found in 
-``yt/analysis_modules/halo_profiler/halo_filters.py``.
-
-Halo filtering takes place during the call to :meth:`make_profiles`.
-The  :meth:`add_halo_filter` method is used to add a filter to be used
-during the profiling:
-
-.. code-block:: python
-
-  hp.add_halo_filter(HP.VirialFilter, must_be_virialized=True, 
-                     overdensity_field='ActualOverdensity', 
-		     virial_overdensity=200, 
-		     virial_filters=[['TotalMassMsun','>=','1e14']],
-		     virial_quantities=['TotalMassMsun','RadiusMpc'],
-		     use_log=True)
-
-The addition above will calculate and return virial quantities,
-mass and radius, for an 
-overdensity of 200.  In order to pass the filter, at least one
-point in the profile must be 
-above the specified overdensity and the virial mass must be at
-least 1e14 solar masses.  The **use_log** keyword indicates that interpolation 
-should be done in log space.  If 
-the VirialFilter function has been added to the filter list,
-the halo profiler will make 
-sure that the fields necessary for calculating virial quantities are added.
-As  many filters as desired can be added.  If filters have been added,
-the next call to :meth:`make_profiles` will filter by all of
-the added filter functions:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out")
-
-If the **filename** keyword is set, a file will be written with all of the 
-filtered halos and the quantities returned by the filter functions.
-
-.. note:: If the profiles have already been run, the halo profiler will read
-   in the previously created output files instead of re-running the profiles.
-   The halo profiler will check to make sure the output file contains all of
-   the requested halo fields.  If not, the profile will be made again from
-   scratch.
-
-.. _halo_profiler_pre_filters:
-
-Pre-filters
-^^^^^^^^^^^
-
-A single dataset can contain thousands or tens of thousands of halos.
-Significant time can 
-be saved by not profiling halos that are certain to not pass any filter
-functions in place.  
-Simple filters based on quantities provided in the initial halo list
-can be used to filter 
-out unwanted halos using the **prefilters** keyword:
-
-.. code-block:: python
-
-  hp.make_profiles(filename="FilteredQuantities.out",
-		   prefilters=["halo['mass'] > 1e13"])
-
-Arguments provided with the **prefilters** keyword should be given
-as a list of strings.  
-Each string in the list will be evaluated with an *eval*.
-
-.. note:: If a VirialFilter function has been added with a filter based
-   on mass (as in the example above), a prefilter will be automatically
-   added to filter out halos with masses greater or less than (depending
-   on the conditional of the filter) a factor of ten of the specified
-   virial mass.
-
-Recentering the Halo For Analysis
----------------------------------
-
-It is possible to move the center of the halo to a new point using an
-arbitrary function for making profiles.
-By default, the center is provided by the halo finder,
-which outputs the center of mass of the particles. For the purposes of
-analysis, it may be important to recenter onto a gas density maximum,
-or a temperature minimum.
-
-There are a number of built-in functions to do this, listed below.
-Each of the functions uses mass-weighted fields for the calculations
-of new center points.
-To use
-them, supply the HaloProfiler with the ``recenter`` option and 
-the name of the function, as in the example below.
-
-.. code-block:: python
-
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter="Max_Dark_Matter_Density")
-
-Additional options are:
-
-  * *Min_Dark_Matter_Density* - Recenter on the point of minimum dark matter
-    density in the halo.
-
-  * *Max_Dark_Matter_Density* - Recenter on the point of maximum dark matter
-    density in the halo.
-
-  * *CoM_Dark_Matter_Density* - Recenter on the center of mass of the dark
-    matter density field. This will be very similar to what the halo finder
-    provides, but not precisely similar.
-
-  * *Min_Gas_Density* - Recenter on the point of minimum gas density in the
-    halo.
-
-  * *Max_Gas_Density* - Recenter on the point of maximum gas density in the
-    halo.
-
-  * *CoM_Gas_Density* - Recenter on the center of mass of the gas density field
-    in the halo.
-
-  * *Min_Total_Density* - Recenter on the point of minimum total (gas + dark
-    matter) density in the halo.
-
-  * *Max_Total_Density* - Recenter on the point of maximum total density in the
-    halo.
-
-  * *CoM_Total_Density* - Recenter on the center of mass for the total density
-    in the halo.
-
-  * *Min_Temperature* - Recenter on the point of minimum temperature in the
-    halo.
-
-  * *Max_Temperature* - Recenter on the point of maximum temperature in the
-    halo.
-
-It is also possible to supply a user-defined function to the HaloProfiler.
-This can be used if the pre-defined functions above are not sufficient.
-The function takes a single argument, a data container for the halo,
-which is a sphere. The function returns a 3-list with the new center.
-
-In this example below, a function is used such that the halos will be
-re-centered on the point of absolute minimum temperature, that is not
-mass weighted.
-
-.. code-block:: python
-
-   from yt.mods import *
-   
-   def find_min_temp(sphere):
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", recenter=find_min_temp)
-
-It is possible to make more complicated functions. This example below extends
-the example above to include a distance control that prevents the center from
-being moved too far. If the recenter moves too far, ``[-1, -1, -1]`` is
-returned which will prevent the halo from being profiled.
-Any triplet of values less than the ``domain_left_edge`` will suffice.
-There will be a note made in the output (stderr) showing which halos were
-skipped.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.utilities.math_utils import periodic_dist
-   
-   def find_min_temp_dist(sphere):
-       old = sphere.center
-       ma, mini, mx, my, mz, mg = sphere.quantities['MinLocation']('temperature')
-       d = sphere.pf['kpc'] * periodic_dist(old, [mx, my, mz],
-           sphere.pf.domain_right_edge - sphere.pf.domain_left_edge)
-       # If new center farther than 5 kpc away, don't recenter
-       if d > 5.: return [-1, -1, -1]
-       return [mx,my,mz]
-   
-   hp = HaloProfiler("enzo_tiny_cosmology/DD0046/DD0046", 
-                     recenter=find_min_temp_dist)
-
-Custom Halo Analysis
---------------------
-
-Besides radial profiles and projections, the halo profiler has the
-ability to run custom analysis functions on each halo.  Custom halo
-analysis functions take two arguments: a halo dictionary containing
-the id, center, etc; and a sphere object.  The example function shown
-below creates a 2D profile of the total mass in bins of density and
-temperature for a given halo.
-
-.. code-block:: python
-
-   from yt.mods import *
-   from yt.data_objects.profiles import BinnedProfile2D
-
-   def halo_2D_profile(halo, sphere):
-       "Make a 2D profile for a halo."
-       my_profile = BinnedProfile2D(sphere,
-             128, 'density', 1e-30, 1e-24, True,
-             128, 'temperature', 1e2, 1e7, True,
-             end_collect=False)
-       my_profile.add_fields('cell_mass', weight=None, fractional=False)
-       my_filename = os.path.join(sphere.pf.fullpath, '2D_profiles', 
-             'Halo_%04d.h5' % halo['id'])
-       my_profile.write_out_h5(my_filename)
-
-Using the  :meth:`analyze_halo_spheres` function, the halo profiler
-will create a sphere centered on each halo, and perform the analysis
-from the custom routine.
-
-.. code-block:: python
-
-    hp.analyze_halo_sphere(halo_2D_profile, halo_list='filtered',
-                           analysis_output_dir='2D_profiles', 
-                           njobs=-1, dynamic=False)
-
-Just like with the :meth:`make_projections` function, the keyword,
-**halo_list**, can be used to select between the full list of halos
-("all"), the filtered list ("filtered"), or an entirely new list given
-in the form of a file name.  If the **analysis_output_dir** keyword is
-set, the halo profiler will make sure the desired directory exists in
-a parallel-safe manner.  Use the **njobs** keyword to control the
-number of jobs over which the profiling is divided.  Setting to -1
-results in a single processor per halo.  Setting to 1 results in all
-available processors working on the same halo.

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/halo_transition.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -0,0 +1,106 @@
+
+Getting up to Speed with Halo Analysis in yt-3.0
+================================================
+
+If you're used to halo analysis in yt-2.x, heres a guide to
+how to update your analysis pipeline to take advantage of
+the new halo catalog infrastructure. 
+
+Finding Halos
+-------------
+
+Previously, halos were found using calls to ``HaloFinder``, 
+``FOFHaloFinder`` and ``RockstarHaloFinder``. Now it is 
+encouraged that you find the halos upon creation of the halo catalog 
+by supplying a value to the ``finder_method`` keyword when calling
+``HaloCatalog``. Currently, only halos found using rockstar or a 
+previous instance of a halo catalog are able to be loaded 
+using the ``halos_ds`` keyword.
+
+To pass additional arguments to the halo finders 
+themselves, supply a dictionary to ``finder_kwargs`` where
+each key in the dictionary is a keyword of the halo finder
+and the corresponding value is the value to be passed for
+that keyword.
+
+Getting Halo Information
+------------------------
+All quantities that used to be present in a ``halo_list`` are
+still able to be found but are not necessarily included by default.
+Every halo will by default have the following properties:
+
+* particle_position_i (where i can be x,y,z)
+* particle_mass
+* virial_radius
+* particle_identifier
+
+If other quantities are desired, they can be included by adding
+the corresponding quantity before the catalog is created. See
+the full halo catalog documentation for further information about
+how to add these quantities and what quantities are available.
+
+You no longer have to iteratre over halos in the ``halo_list``.
+Now a halo dataset can be treated as a regular dataset and 
+all quantities are available by accessing ``all_data``.
+Specifically, all quantities can be accessed as shown:
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+   ad = hc.all_data()
+   masses = ad['particle_mass'][:]
+
+
+Prefiltering Halos
+------------------
+
+Prefiltering halos before analysis takes place is now done
+by adding a filter before the call to create. An example
+is shown below
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
+   hc.create()
+
+Profiling Halos
+---------------
+
+The halo profiler available in yt-2.x has been removed, and
+profiling functionality is now completely contained within the
+halo catalog. A complete example of how to profile halos by 
+radius using the new infrastructure is given in 
+:ref:`halo_analysis_example`. 
+
+Plotting Halos
+--------------
+
+Annotating halo locations onto a slice or projection works in 
+the same way as in yt-2.x, but now a halo catalog must be
+passed to the annotate halo call rather than a halo list.
+
+.. code-block:: python
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
+   hc.create()
+
+   prj = ProjectionPlot(data_ds, 'z', 'density')
+   prj.annotate_halos(hc)
+   prj.save()
+
+Written Data
+------------
+
+Data is now written out in the form of h5 files rather than
+text files. The directory they are written out to is 
+controlled by the keyword ``output_dir``. Each quantity
+is a field in the file.

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ b/doc/source/analyzing/analysis_modules/hmf_howto.rst
@@ -27,8 +27,8 @@
 .. code-block:: python
 
   from yt.mods import *
-  pf = load("data0001")
-  halo_list = HaloFinder(pf)
+  ds = load("data0001")
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
 
 The only important columns of data in the text file ``HopAnalysis.out``
@@ -79,8 +79,8 @@
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0001")
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  ds = load("data0001")
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')
@@ -107,9 +107,9 @@
   from yt.analysis_modules.halo_mass_function.api import *
   
   # If desired, start loop here.
-  pf = load("data0001")
+  ds = load("data0001")
   
-  halo_list = HaloFinder(pf)
+  halo_list = HaloFinder(ds)
   halo_list.write_out("HopAnalysis.out")
   
   hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
@@ -120,7 +120,7 @@
                 virial_quantities=['TotalMassMsun','RadiusMpc'])
   hp.make_profiles(filename="VirialHaloes.out")
   
-  hmf = HaloMassFcn(pf, halo_file="VirialHaloes.out", 
+  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
   sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
   fitting_function=4, mass_column=5, num_sigma_bins=200)
   hmf.write_out(prefix='hmf')

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/light_cone_generator.rst
--- a/doc/source/analyzing/analysis_modules/light_cone_generator.rst
+++ b/doc/source/analyzing/analysis_modules/light_cone_generator.rst
@@ -60,7 +60,7 @@
    when gathering datasets for time series.  Default: True.
 
  * **set_parameters** (*dict*): Dictionary of parameters to attach to 
-   pf.parameters.  Default: None.
+   ds.parameters.  Default: None.
 
  * **output_dir** (*string*): The directory in which images and data files
     will be written.  Default: 'LC'.

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/merger_tree.rst
--- a/doc/source/analyzing/analysis_modules/merger_tree.rst
+++ b/doc/source/analyzing/analysis_modules/merger_tree.rst
@@ -2,8 +2,9 @@
 
 Halo Merger Tree
 ================
-.. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
-.. versionadded:: 1.7
+
+.. note:: At the moment the merger tree is not yet implemented using new 
+    halo catalog functionality. 
 
 The Halo Merger Tree extension is capable of building a database of halo mergers
 over a set of time-ordered Enzo datasets. The fractional contribution of older

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -48,7 +48,7 @@
 
 .. code:: python
 
-    pf = load("MHDSloshing/virgo_low_res.0054.vtk",
+    ds = load("MHDSloshing/virgo_low_res.0054.vtk",
               parameters={"time_unit":(1.0,"Myr"),
                           "length_unit":(1.0,"Mpc"),
                           "mass_unit":(1.0e14,"Msun")}) 
@@ -423,7 +423,7 @@
 evacuated two "bubbles" of radius 30 kpc at a distance of 50 kpc from
 the center. 
 
-Now, we create a parameter file out of this dataset:
+Now, we create a yt Dataset object out of this dataset:
 
 .. code:: python
 
@@ -445,7 +445,7 @@
 
 .. code:: python
 
-   sphere = ds.sphere(pf.domain_center, (1.0,"Mpc"))
+   sphere = ds.sphere(ds.domain_center, (1.0,"Mpc"))
        
    A = 6000.
    exp_time = 2.0e5

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ b/doc/source/analyzing/analysis_modules/radial_column_density.rst
@@ -41,15 +41,15 @@
 
   from yt.mods import *
   from yt.analysis_modules.radial_column_density.api import *
-  pf = load("data0030")
+  ds = load("data0030")
   
-  rcdnumdens = RadialColumnDensity(pf, 'NumberDensity', [0.5, 0.5, 0.5],
+  rcdnumdens = RadialColumnDensity(ds, 'NumberDensity', [0.5, 0.5, 0.5],
     max_radius = 0.5)
   def _RCDNumberDensity(field, data, rcd = rcdnumdens):
       return rcd._build_derived_field(data)
   add_field('RCDNumberDensity', _RCDNumberDensity, units=r'1/\rm{cm}^2')
   
-  dd = pf.h.all_data()
+  dd = ds.all_data()
   print dd['RCDNumberDensity']
 
 The field ``RCDNumberDensity`` can be used just like any other derived field

diff -r e1b78cda9ec7254e8436bbe34be8ef12fa1178f3 -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -41,8 +41,8 @@
 
 .. code-block:: python
 
-    pf = load("galaxy0030/galaxy0030")
-    writer = RadMC3DWriter(pf)
+    ds = load("galaxy0030/galaxy0030")
+    writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
     writer.write_dust_file("DustDensity", "dust_density.inp")
@@ -87,8 +87,8 @@
         return (x_co/mu_h)*data["density"]
     add_field("NumberDensityCO", function=_NumberDensityCO)
     
-    pf = load("galaxy0030/galaxy0030")
-    writer = RadMC3DWriter(pf)
+    ds = load("galaxy0030/galaxy0030")
+    writer = RadMC3DWriter(ds)
     
     writer.write_amr_grid()
     writer.write_line_file("NumberDensityCO", "numberdens_co.inp")

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a64caf0d74e7/
Changeset:   a64caf0d74e7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 16:02:55
Summary:     Merging again
Affected #:  4 files

diff -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -15,6 +15,7 @@
 
 import copy
 import numpy as np
+import uuid
 
 from .clump_info_items import \
      clump_info_registry
@@ -22,6 +23,25 @@
 from .contour_finder import \
      identify_contours
 
+from yt.fields.derived_field import \
+    ValidateSpatial
+
+def add_contour_field(ds, contour_key):
+    def _contours(field, data):
+        fd = data.get_field_parameter("contour_slices_%s" % contour_key)
+        vals = data["index", "ones"] * -1
+        if fd is None or fd == 0.0:
+            return vals
+        for sl, v in fd.get(data.id, []):
+            vals[sl] = v
+        return vals
+
+    ds.add_field(("index", "contours_%s" % contour_key),
+                 function=_contours,
+                 validators=[ValidateSpatial(0)],
+                 take_log=False,
+                 display_field=False)
+
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
@@ -103,11 +123,14 @@
         for sl_list in cids.values():
             for sl, ff in sl_list:
                 unique_contours.update(np.unique(ff))
+        contour_key = uuid.uuid4().hex
+        base_object = getattr(self.data, 'base_object', self.data)
+        add_contour_field(base_object.pf, contour_key)
         for cid in sorted(unique_contours):
             if cid == -1: continue
-            new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid)],
-                    {'contour_slices': cids})
+            new_clump = base_object.cut_region(
+                    ["obj['contours_%s'] == %s" % (contour_key, cid)],
+                    {('contour_slices_%s' % contour_key): cids})
             if new_clump["ones"].size == 0:
                 # This is to skip possibly duplicate clumps.  Using "ones" here
                 # will speed things up.

diff -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -51,6 +51,8 @@
             LE, RE, dims.astype("int64"))
         contours[nid] = (g.Level, node.node_ind, pg, sl)
     node_ids = np.array(node_ids)
+    if node_ids.size == 0:
+        return 0, {}
     trunk = data_source.tiles.tree.trunk
     mylog.info("Linking node (%s) contours.", len(contours))
     link_node_contours(trunk, contours, tree, node_ids)

diff -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -722,9 +722,17 @@
     def blocks(self):
         # We have to take a slightly different approach here.  Note that all
         # that .blocks has to yield is a 3D array and a mask.
-        for b, m in self.base_object.blocks:
-            m[~self._cond_ind] = 0
-            yield b, m
+        for obj, m in self.base_object.blocks:
+            m = m.copy()
+            with obj._field_parameter_state(self.field_parameters):
+                for cond in self.conditionals:
+                    ss = eval(cond)
+                    m = np.logical_and(m, ss, m)
+            if not np.any(m): continue
+            yield obj, m
+
+    def cut_region(self, *args, **kwargs):
+        raise NotImplementedError
 
     @property
     def _cond_ind(self):

diff -r b3eb46662e7684f5d5e492efa7d953cbebd7fda9 -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -207,18 +207,3 @@
              units="cm",
              display_field=False)
 
-    def _contours(field, data):
-        fd = data.get_field_parameter("contour_slices")
-        vals = data["index", "ones"] * -1
-        if fd is None or fd == 0.0:
-            return vals
-        for sl, v in fd.get(data.id, []):
-            vals[sl] = v
-        return vals
-    
-    registry.add_field(("index", "contours"),
-                       function=_contours,
-                       validators=[ValidateSpatial(0)],
-                       take_log=False,
-                       display_field=False)
-


https://bitbucket.org/yt_analysis/yt/commits/0aae7725a9f2/
Changeset:   0aae7725a9f2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 18:51:30
Summary:     Wiping out counts for mesh contour joining.
Affected #:  1 file

diff -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf -r 0aae7725a9f255d005bb08c6b45a631f0cfa0a36 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -228,7 +228,7 @@
         cdef int i, n, ins
         cdef np.int64_t cid1, cid2
         # Okay, this requires lots of iteration, unfortunately
-        cdef ContourID *cur, *root
+        cdef ContourID *cur, *c1, *c2
         n = join_tree.shape[0]
         #print "Counting"
         #print "Checking", self.count()
@@ -253,6 +253,7 @@
                 print "  Inspected ", ins
                 raise RuntimeError
             else:
+                c1.count = c2.count = 0
                 contour_union(c1, c2)
 
     def count(self):
@@ -335,6 +336,7 @@
                                 c2 = container[offset]
                                 if c2 == NULL: continue
                                 c2 = contour_find(c2)
+                                cur.count = c2.count = 0
                                 contour_union(cur, c2)
                                 cur = contour_find(cur)
         for i in range(ni):
@@ -520,97 +522,6 @@
     new_joins = tree.cull_joins(joins[:ti,:])
     tree.add_joins(new_joins)
 
-cdef inline int are_neighbors(
-            np.float64_t x1, np.float64_t y1, np.float64_t z1,
-            np.float64_t dx1, np.float64_t dy1, np.float64_t dz1,
-            np.float64_t x2, np.float64_t y2, np.float64_t z2,
-            np.float64_t dx2, np.float64_t dy2, np.float64_t dz2,
-        ):
-    # We assume an epsilon of 1e-15
-    if fabs(x1-x2) > 0.5*(dx1+dx2): return 0
-    if fabs(y1-y2) > 0.5*(dy1+dy2): return 0
-    if fabs(z1-z2) > 0.5*(dz1+dz2): return 0
-    return 1
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def identify_field_neighbors(
-            np.ndarray[dtype=np.float64_t, ndim=1] field,
-            np.ndarray[dtype=np.float64_t, ndim=1] x,
-            np.ndarray[dtype=np.float64_t, ndim=1] y,
-            np.ndarray[dtype=np.float64_t, ndim=1] z,
-            np.ndarray[dtype=np.float64_t, ndim=1] dx,
-            np.ndarray[dtype=np.float64_t, ndim=1] dy,
-            np.ndarray[dtype=np.float64_t, ndim=1] dz,
-        ):
-    # We assume this field is pre-jittered; it has no identical values.
-    cdef int outer, inner, N, added
-    cdef np.float64_t x1, y1, z1, dx1, dy1, dz1
-    N = field.shape[0]
-    #cdef np.ndarray[dtype=np.object_t] joins
-    joins = [[] for outer in range(N)]
-    #joins = np.empty(N, dtype='object')
-    for outer in range(N):
-        if (outer % 10000) == 0: print outer, N
-        x1 = x[outer]
-        y1 = y[outer]
-        z1 = z[outer]
-        dx1 = dx[outer]
-        dy1 = dy[outer]
-        dz1 = dz[outer]
-        this_joins = joins[outer]
-        added = 0
-        # Go in reverse order
-        for inner in range(outer, 0, -1):
-            if not are_neighbors(x1, y1, z1, dx1, dy1, dz1,
-                                 x[inner], y[inner], z[inner],
-                                 dx[inner], dy[inner], dz[inner]):
-                continue
-            # Hot dog, we have a weiner!
-            this_joins.append(inner)
-            added += 1
-            if added == 26: break
-    return joins
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def extract_identified_contours(int max_ind, joins):
-    cdef int i
-    contours = []
-    for i in range(max_ind + 1): # +1 to get to the max_ind itself
-        contours.append(set([i]))
-        if len(joins[i]) == 0:
-            continue
-        proto_contour = [i]
-        for j in joins[i]:
-            proto_contour += contours[j]
-        proto_contour = set(proto_contour)
-        for j in proto_contour:
-            contours[j] = proto_contour
-    return contours
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def update_flat_joins(np.ndarray[np.int64_t, ndim=2] joins,
-                 np.ndarray[np.int64_t, ndim=1] contour_ids,
-                 np.ndarray[np.int64_t, ndim=1] final_joins):
-    cdef np.int64_t new, old
-    cdef int i, j, nj, nf, counter
-    cdef int ci, cj, ck
-    nj = joins.shape[0]
-    nf = final_joins.shape[0]
-    for ci in range(contour_ids.shape[0]):
-        if contour_ids[ci] == -1: continue
-        for j in range(nj):
-            if contour_ids[ci] == joins[j,0]:
-                contour_ids[ci] = joins[j,1]
-                break
-        for j in range(nf):
-            if contour_ids[ci] == final_joins[j]:
-                contour_ids[ci] = j + 1
-                break
-
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def update_joins(np.ndarray[np.int64_t, ndim=2] joins,


https://bitbucket.org/yt_analysis/yt/commits/4dba8041c4d9/
Changeset:   4dba8041c4d9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-21 20:33:26
Summary:     Allow tie-breaking for examined nodes.
Affected #:  1 file

diff -r 0aae7725a9f255d005bb08c6b45a631f0cfa0a36 -r 4dba8041c4d9bb269912e04d648ff0232d4cf22e yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -344,7 +344,6 @@
                 for k in range(nk):
                     c1 = container[i*nj*nk + j*nk + k]
                     if c1 == NULL: continue
-                    cur = c1
                     c1 = contour_find(c1)
                     contour_ids[i,j,k] = c1.contour_id
         
@@ -413,73 +412,94 @@
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
+                if i == 0 and oi == -1: continue
+                if i == ny - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
+                    if j == 0 and oj == -1: continue
+                    if j == nz - 1 and oj == 1: continue
                     # Adjust by -1 in x, then oi and oj in y and z
                     get_spos(vc0, -1, i + oi, j + oj, 0, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, 0, i, j)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
+                        # If the other node has been examined, then we assume
+                        # it has already resolved the conflict.
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
                             ti += 1
                     # This is outside our vc
                     get_spos(vc0, nx, i + oi, j + oj, 0, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, nx - 1, i, j)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
     # Now y-pass
     for i in range(nx):
         for j in range(nz):
             for offset_i in range(3):
                 oi = offset_i - 1
+                if i == 0 and oi == -1: continue
+                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
+                    if j == 0 and oj == -1: continue
+                    if j == nz - 1 and oj == 1: continue
                     get_spos(vc0, i + oi, -1, j + oj, 1, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, i, 0, j)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
                             ti += 1
 
                     get_spos(vc0, i + oi, ny, j + oj, 1, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, i, ny - 1, j)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
                             ti += 1
 
     # Now z-pass
@@ -487,36 +507,46 @@
         for j in range(ny):
             for offset_i in range(3):
                 oi = offset_i - 1
+                if i == 0 and oi == -1: continue
+                if i == nx - 1 and oi == 1: continue
                 for offset_j in range(3):
                     oj = offset_j - 1
+                    if j == 0 and oj == -1: continue
+                    if j == ny - 1 and oj == 1: continue
                     get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, i, j, 0)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
                             ti += 1
 
                     get_spos(vc0, i + oi, j + oj, nz, 2, spos)
                     adj_node = _find_node(trunk, spos)
                     vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
+                    if spos_contained(vc1, spos):
                         # This is outside our VC, as 0 is a boundary layer
                         index = vc_index(vc0, i, j, nz - 1)
                         c1 = (<np.int64_t*>vc0.data[0])[index]
                         index = vc_pos_index(vc1, spos)
                         c2 = (<np.int64_t*>vc1.data[0])[index]
                         if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
+                            if examined[adj_node.node_ind] == 0:
+                                joins[ti,0] = i64max(c1,c2)
+                                joins[ti,1] = i64min(c1,c2)
+                            else:
+                                joins[ti,0] = c1
+                                joins[ti,1] = c2
                             ti += 1
     if ti == 0: return
     new_joins = tree.cull_joins(joins[:ti,:])


https://bitbucket.org/yt_analysis/yt/commits/c38c4035fc5f/
Changeset:   c38c4035fc5f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-21 16:36:08
Summary:     Converting valid functions from evals of strings to a callback system and updating recipe.
Affected #:  4 files

diff -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf -r c38c4035fc5f9d2467f196b4bcd2c0d5d654f142 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -22,11 +22,12 @@
 c_min = 10**np.floor(np.log10(data_source[field]).min()  )
 c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
-# keep only clumps with at least 20 cells
-function = 'self.data[\'%s\'].size > 20' % field
+# Now find get our 'base' clump -- this one just covers the whole domain.
+master_clump = Clump(data_source, None, field)
 
-# Now find get our 'base' clump -- this one just covers the whole domain.
-master_clump = Clump(data_source, None, field, function=function)
+# Add a "validator" to weed out clumps with less than 20 cells.
+# As many validators can be added as you want.
+master_clump.add_validator("min_cells", 20)
 
 # This next command accepts our base clump and we say the range between which
 # we want to contour.  It recursively finds clumps within the master clump, at

diff -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf -r c38c4035fc5f9d2467f196b4bcd2c0d5d654f142 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -25,6 +25,9 @@
 
 from .clump_info_items import \
     add_clump_info
+
+from .clump_validators import \
+    add_validator
     
 from .clump_tools import \
     recursive_all_clumps, \

diff -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf -r c38c4035fc5f9d2467f196b4bcd2c0d5d654f142 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -19,6 +19,8 @@
 
 from .clump_info_items import \
      clump_info_registry
+from .clump_validators import \
+     clump_validator_registry
 
 from .contour_finder import \
      identify_contours
@@ -45,7 +47,7 @@
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
-                 function=None, clump_info=None):
+                 clump_info=None, validators=None):
         self.parent = parent
         self.data = data
         self.quantities = data.quantities
@@ -62,16 +64,23 @@
             # Clump info will act the same if add_info_item is called before or after clump finding.
             self.clump_info = copy.deepcopy(clump_info)
 
-        # Function determining whether a clump is valid and should be kept.
-        self.default_function = 'self.data.quantities["IsBound"](truncate=True,include_thermal_energy=True) > 1.0'
-        if function is None:
-            self.function = self.default_function
-        else:
-            self.function = function
+        if validators is None:
+            validators = []
+        self.validators = validators
+        # Return value of validity function.
+        self.valid = None
 
-        # Return value of validity function, saved so it does not have to be calculated again.
-        self.function_value = None
-
+    def add_validator(self, validator, *args, **kwargs):
+        """
+        Add a validating function to determine whether the clump should 
+        be kept.
+        """
+        callback = clump_validator_registry.find(validator, *args, **kwargs)
+        self.validators.append(callback)
+        if self.children is None: return
+        for child in self.children:
+            child.add_validator(validator)
+        
     def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
@@ -136,7 +145,7 @@
                 # will speed things up.
                 continue
             self.children.append(Clump(new_clump, self, self.field,
-                                       self.cached_fields,function=self.function,
+                                       self.cached_fields,validators=self.validators,
                                        clump_info=self.clump_info))
 
     def pass_down(self,operation):
@@ -152,24 +161,30 @@
         for child in self.children:
             child.pass_down(operation)
 
-    def _isValid(self):
-        "Perform user specified function to determine if child clumps should be kept."
+    def _validate(self):
+        "Apply all user specified validator functions."
 
-        # Only call function if it has not been already.
-        if self.function_value is None:
-            self.function_value = eval(self.function)
+        # Only call functions if not done already.
+        if self.valid is not None:
+            return self.valid
 
-        return self.function_value
+        self.valid = True
+        for validator in self.validators:
+            self.valid &= validator(self)
+            if not self.valid:
+                break
+
+        return self.valid
 
     def __reduce__(self):
         return (_reconstruct_clump, 
                 (self.parent, self.field, self.min_val, self.max_val,
-                 self.function_value, self.children, self.data, self.clump_info, self.function))
+                 self.valid, self.children, self.data, self.clump_info, self.function))
 
     def __getitem__(self,request):
         return self.data[request]
 
-def _reconstruct_clump(parent, field, mi, ma, function_value, children, data, clump_info, 
+def _reconstruct_clump(parent, field, mi, ma, valid, children, data, clump_info, 
         function=None):
     obj = object.__new__(Clump)
     if iterable(parent):
@@ -178,8 +193,8 @@
         except KeyError:
             parent = parent
     if children is None: children = []
-    obj.parent, obj.field, obj.min_val, obj.max_val, obj.function_value, obj.children, obj.clump_info, obj.function = \
-        parent, field, mi, ma, function_value, children, clump_info, function
+    obj.parent, obj.field, obj.min_val, obj.max_val, obj.valid, obj.children, obj.clump_info, obj.function = \
+        parent, field, mi, ma, valid, children, clump_info, function
     # Now we override, because the parent/child relationship seems a bit
     # unreliable in the unpickling
     for child in children: child.parent = obj
@@ -203,7 +218,7 @@
             find_clumps(child, min_val*d_clump, max_val, d_clump)
             if ((child.children is not None) and (len(child.children) > 0)):
                 these_children.append(child)
-            elif (child._isValid()):
+            elif (child._validate()):
                 these_children.append(child)
             else:
                 print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])

diff -r a64caf0d74e7a89104ca0c9d1a83530122d40aaf -r c38c4035fc5f9d2467f196b4bcd2c0d5d654f142 yt/analysis_modules/level_sets/clump_validators.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -0,0 +1,51 @@
+"""
+ClumpValidators and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_validator_registry = OperatorRegistry()
+
+def add_validator(name, function):
+    clump_validator_registry[name] = ClumpValidator(function)
+
+class ClumpValidator(object):
+    r"""
+    A ClumpValidator is a function that takes a clump and returns 
+    True or False as to whether the clump is valid and shall be kept.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _gravitationally_bound(clump, truncate=True,
+                           include_thermal_energy=True):
+    "True if clump is gravitationally bound."
+    return (clump.quantities.is_bound(truncate=truncate,
+        include_thermal_energy=include_thermal_energy) > 1.0)
+add_validator("gravitationally_bound", _gravitationally_bound)
+
+def _min_cells(clump, n_cells):
+    "True if clump has a minimum number of cells."
+    return (clump["index", "ones"].size >= n_cells)
+add_validator("min_cells", _min_cells)


https://bitbucket.org/yt_analysis/yt/commits/096d4ad61fe5/
Changeset:   096d4ad61fe5
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-21 22:14:34
Summary:     Merging.
Affected #:  4 files

diff -r 4dba8041c4d9bb269912e04d648ff0232d4cf22e -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -22,11 +22,12 @@
 c_min = 10**np.floor(np.log10(data_source[field]).min()  )
 c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
-# keep only clumps with at least 20 cells
-function = 'self.data[\'%s\'].size > 20' % field
+# Now find get our 'base' clump -- this one just covers the whole domain.
+master_clump = Clump(data_source, None, field)
 
-# Now find get our 'base' clump -- this one just covers the whole domain.
-master_clump = Clump(data_source, None, field, function=function)
+# Add a "validator" to weed out clumps with less than 20 cells.
+# As many validators can be added as you want.
+master_clump.add_validator("min_cells", 20)
 
 # This next command accepts our base clump and we say the range between which
 # we want to contour.  It recursively finds clumps within the master clump, at

diff -r 4dba8041c4d9bb269912e04d648ff0232d4cf22e -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -25,6 +25,9 @@
 
 from .clump_info_items import \
     add_clump_info
+
+from .clump_validators import \
+    add_validator
     
 from .clump_tools import \
     recursive_all_clumps, \

diff -r 4dba8041c4d9bb269912e04d648ff0232d4cf22e -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -19,6 +19,8 @@
 
 from .clump_info_items import \
      clump_info_registry
+from .clump_validators import \
+     clump_validator_registry
 
 from .contour_finder import \
      identify_contours
@@ -45,7 +47,7 @@
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
-                 function=None, clump_info=None):
+                 clump_info=None, validators=None):
         self.parent = parent
         self.data = data
         self.quantities = data.quantities
@@ -62,16 +64,23 @@
             # Clump info will act the same if add_info_item is called before or after clump finding.
             self.clump_info = copy.deepcopy(clump_info)
 
-        # Function determining whether a clump is valid and should be kept.
-        self.default_function = 'self.data.quantities["IsBound"](truncate=True,include_thermal_energy=True) > 1.0'
-        if function is None:
-            self.function = self.default_function
-        else:
-            self.function = function
+        if validators is None:
+            validators = []
+        self.validators = validators
+        # Return value of validity function.
+        self.valid = None
 
-        # Return value of validity function, saved so it does not have to be calculated again.
-        self.function_value = None
-
+    def add_validator(self, validator, *args, **kwargs):
+        """
+        Add a validating function to determine whether the clump should 
+        be kept.
+        """
+        callback = clump_validator_registry.find(validator, *args, **kwargs)
+        self.validators.append(callback)
+        if self.children is None: return
+        for child in self.children:
+            child.add_validator(validator)
+        
     def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
@@ -136,7 +145,7 @@
                 # will speed things up.
                 continue
             self.children.append(Clump(new_clump, self, self.field,
-                                       self.cached_fields,function=self.function,
+                                       self.cached_fields,validators=self.validators,
                                        clump_info=self.clump_info))
 
     def pass_down(self,operation):
@@ -152,24 +161,30 @@
         for child in self.children:
             child.pass_down(operation)
 
-    def _isValid(self):
-        "Perform user specified function to determine if child clumps should be kept."
+    def _validate(self):
+        "Apply all user specified validator functions."
 
-        # Only call function if it has not been already.
-        if self.function_value is None:
-            self.function_value = eval(self.function)
+        # Only call functions if not done already.
+        if self.valid is not None:
+            return self.valid
 
-        return self.function_value
+        self.valid = True
+        for validator in self.validators:
+            self.valid &= validator(self)
+            if not self.valid:
+                break
+
+        return self.valid
 
     def __reduce__(self):
         return (_reconstruct_clump, 
                 (self.parent, self.field, self.min_val, self.max_val,
-                 self.function_value, self.children, self.data, self.clump_info, self.function))
+                 self.valid, self.children, self.data, self.clump_info, self.function))
 
     def __getitem__(self,request):
         return self.data[request]
 
-def _reconstruct_clump(parent, field, mi, ma, function_value, children, data, clump_info, 
+def _reconstruct_clump(parent, field, mi, ma, valid, children, data, clump_info, 
         function=None):
     obj = object.__new__(Clump)
     if iterable(parent):
@@ -178,8 +193,8 @@
         except KeyError:
             parent = parent
     if children is None: children = []
-    obj.parent, obj.field, obj.min_val, obj.max_val, obj.function_value, obj.children, obj.clump_info, obj.function = \
-        parent, field, mi, ma, function_value, children, clump_info, function
+    obj.parent, obj.field, obj.min_val, obj.max_val, obj.valid, obj.children, obj.clump_info, obj.function = \
+        parent, field, mi, ma, valid, children, clump_info, function
     # Now we override, because the parent/child relationship seems a bit
     # unreliable in the unpickling
     for child in children: child.parent = obj
@@ -203,7 +218,7 @@
             find_clumps(child, min_val*d_clump, max_val, d_clump)
             if ((child.children is not None) and (len(child.children) > 0)):
                 these_children.append(child)
-            elif (child._isValid()):
+            elif (child._validate()):
                 these_children.append(child)
             else:
                 print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])

diff -r 4dba8041c4d9bb269912e04d648ff0232d4cf22e -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 yt/analysis_modules/level_sets/clump_validators.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -0,0 +1,51 @@
+"""
+ClumpValidators and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_validator_registry = OperatorRegistry()
+
+def add_validator(name, function):
+    clump_validator_registry[name] = ClumpValidator(function)
+
+class ClumpValidator(object):
+    r"""
+    A ClumpValidator is a function that takes a clump and returns 
+    True or False as to whether the clump is valid and shall be kept.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _gravitationally_bound(clump, truncate=True,
+                           include_thermal_energy=True):
+    "True if clump is gravitationally bound."
+    return (clump.quantities.is_bound(truncate=truncate,
+        include_thermal_energy=include_thermal_energy) > 1.0)
+add_validator("gravitationally_bound", _gravitationally_bound)
+
+def _min_cells(clump, n_cells):
+    "True if clump has a minimum number of cells."
+    return (clump["index", "ones"].size >= n_cells)
+add_validator("min_cells", _min_cells)


https://bitbucket.org/yt_analysis/yt/commits/eab19453cc33/
Changeset:   eab19453cc33
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-22 03:24:42
Summary:     Refactor contour finding to consolidate logic.
Affected #:  1 file

diff -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 -r eab19453cc33aaa4f78f66ef1ab1d81930abe2e2 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -385,6 +385,7 @@
         if spos[i] <= vc.left_edge[i] or spos[i] >= vc.right_edge[i]: return 0
     return 1
 
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 cdef void construct_boundary_relationships(Node trunk, ContourTree tree, 
@@ -393,161 +394,64 @@
                 np.ndarray[np.int64_t, ndim=1] node_ids):
     # We only look at the boundary and find the nodes next to it.
     # Contours is a dict, keyed by the node.id.
-    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
+    cdef int i, j, off_i, off_j, oi, oj, level, ax, ax0, ax1, n1, n2
     cdef np.int64_t c1, c2
     cdef Node adj_node
     cdef VolumeContainer *vc1, *vc0 = vcs[nid]
-    nx = vc0.dims[0]
-    ny = vc0.dims[1]
-    nz = vc0.dims[2]
-    cdef int s = (ny*nx + nx*nz + ny*nz) * 18
+    cdef int s = (vc0.dims[1]*vc0.dims[0]
+                + vc0.dims[0]*vc0.dims[2]
+                + vc0.dims[1]*vc0.dims[2]) * 18
     # We allocate an array of fixed (maximum) size
     cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
-    cdef int ti = 0
-    cdef int index
+    cdef int ti = 0, side
+    cdef int index, pos[3], my_pos[3]
     cdef np.float64_t spos[3]
 
-    # First the x-pass
-    for i in range(ny):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == ny - 1 and oi == 1: continue
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    # Adjust by -1 in x, then oi and oj in y and z
-                    get_spos(vc0, -1, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, 0, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        # If the other node has been examined, then we assume
-                        # it has already resolved the conflict.
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
+    for ax in range(3):
+        ax0 = (ax + 1) % 3
+        ax1 = (ax + 2) % 3
+        n1 = vc0.dims[ax0]
+        n2 = vc0.dims[ax1]
+        for i in range(n1):
+            for j in range(n2):
+                for off_i in range(3):
+                    oi = off_i - 1
+                    if i == 0 and oi == -1: continue
+                    if i == n1 - 1 and oi == 1: continue
+                    for off_j in range(3):
+                        oj = off_j - 1
+                        if j == 0 and oj == -1: continue
+                        if j == n2 - 1 and oj == 1: continue
+                        pos[ax0] = i + oi
+                        pos[ax1] = j + oj
+                        my_pos[ax0] = i
+                        my_pos[ax1] = j
+                        for side in range(2):
+                            # We go off each end of the block.
+                            if side == 0:
+                                pos[ax] = -1
+                                my_pos[ax] = 0
                             else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-                            ti += 1
-                    # This is outside our vc
-                    get_spos(vc0, nx, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, nx - 1, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
-                            else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-    # Now y-pass
-    for i in range(nx):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == nz - 1 and oj == 1: continue
-                    get_spos(vc0, i + oi, -1, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, 0, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
-                            else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-                            ti += 1
+                                pos[ax] = vc0.dims[ax]
+                                my_pos[ax] = vc0.dims[ax]-1
+                            get_spos(vc0, pos[0], pos[1], pos[2], ax, spos)
+                            adj_node = _find_node(trunk, spos)
+                            vc1 = vcs[adj_node.node_ind]
+                            if spos_contained(vc1, spos):
+                                index = vc_index(vc0, my_pos[0], 
+                                                 my_pos[1], my_pos[2])
+                                c1 = (<np.int64_t*>vc0.data[0])[index]
+                                index = vc_pos_index(vc1, spos)
+                                c2 = (<np.int64_t*>vc1.data[0])[index]
+                                if c1 > -1 and c2 > -1:
+                                    if examined[adj_node.node_ind] == 0:
+                                        joins[ti,0] = i64max(c1,c2)
+                                        joins[ti,1] = i64min(c1,c2)
+                                    else:
+                                        joins[ti,0] = c1
+                                        joins[ti,1] = c2
+                                    ti += 1
 
-                    get_spos(vc0, i + oi, ny, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, ny - 1, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
-                            else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-                            ti += 1
-
-    # Now z-pass
-    for i in range(nx):
-        for j in range(ny):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    if j == 0 and oj == -1: continue
-                    if j == ny - 1 and oj == 1: continue
-                    get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, 0)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
-                            else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-                            ti += 1
-
-                    get_spos(vc0, i + oi, j + oj, nz, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, nz - 1)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            if examined[adj_node.node_ind] == 0:
-                                joins[ti,0] = i64max(c1,c2)
-                                joins[ti,1] = i64min(c1,c2)
-                            else:
-                                joins[ti,0] = c1
-                                joins[ti,1] = c2
-                            ti += 1
     if ti == 0: return
     new_joins = tree.cull_joins(joins[:ti,:])
     tree.add_joins(new_joins)


https://bitbucket.org/yt_analysis/yt/commits/3859d333d3aa/
Changeset:   3859d333d3aa
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-22 00:14:01
Summary:     Adding gravitational boundedness clump validator.
Affected #:  2 files

diff -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 -r 3859d333d3aab6e30cc633b7df662c6c586ec4ba yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -15,8 +15,11 @@
 
 import numpy as np
 
+from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.operator_registry import \
-     OperatorRegistry
+    OperatorRegistry
+from yt.utilities.physical_constants import \
+    gravitational_constant_cgs as G
 
 clump_validator_registry = OperatorRegistry()
 
@@ -38,11 +41,52 @@
     def __call__(self, clump):
         return self.function(clump, *self.args, **self.kwargs)
     
-def _gravitationally_bound(clump, truncate=True,
-                           include_thermal_energy=True):
+def _gravitationally_bound(clump, use_thermal_energy=True,
+                           use_particles=True, truncate=True):
     "True if clump is gravitationally bound."
-    return (clump.quantities.is_bound(truncate=truncate,
-        include_thermal_energy=include_thermal_energy) > 1.0)
+
+    use_particles &= \
+      ("all", "particle_mass") in clump.data.ds.field_info
+    
+    bulk_velocity = clump.quantities.bulk_velocity(use_particles=use_particles)
+
+    kinetic = 0.5 * (clump["gas", "cell_mass"] *
+        ((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
+         (bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
+         (bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
+
+    if use_thermal_energy:
+        kinetic += (clump["gas", "cell_mass"] *
+                    clump["gas", "thermal_energy"]).sum()
+
+    if use_particles:
+        kinetic += 0.5 * (clump["all", "particle_mass"] *
+            ((bulk_velocity[0] - clump["all", "particle_velocity_x"])**2 +
+             (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
+             (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
+
+    potential = clump.data.ds.quan(G *
+        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
+                          clump["index", "x"].in_cgs(),
+                          clump["index", "y"].in_cgs(),
+                          clump["index", "z"].in_cgs(),
+                          truncate, (kinetic / G).in_cgs()),
+        kinetic.in_cgs().units)
+    
+    if truncate and potential >= kinetic:
+        return True
+
+    if use_particles:
+        potential += clump.data.ds.quan(G *
+            FindBindingEnergy(
+                clump["all", "particle_mass"].in_cgs(),
+                clump["all", "particle_position_x"].in_cgs(),
+                clump["all", "particle_position_y"].in_cgs(),
+                clump["all", "particle_position_z"].in_cgs(),
+                truncate, ((kinetic - potential) / G).in_cgs()),
+        kinetic.in_cgs().units)
+
+    return potential >= kinetic
 add_validator("gravitationally_bound", _gravitationally_bound)
 
 def _min_cells(clump, n_cells):

diff -r 096d4ad61fe5ceb7f7873e3649a74c9a44d2fbe7 -r 3859d333d3aab6e30cc633b7df662c6c586ec4ba yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,14 +21,12 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
 from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs, \
-    mass_sun_cgs, \
     HUGE
 from yt.utilities.math_utils import prec_accum
 
@@ -261,7 +259,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class BulkVelocity(DerivedQuantity):
     r"""
@@ -299,7 +297,8 @@
     def process_chunk(self, data, use_gas = True, use_particles = False):
         vals = []
         if use_gas:
-            vals += [(data["velocity_%s" % ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["velocity_%s" % ax] * 
+                      data["cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
             vals.append(data["cell_mass"].sum(dtype=np.float64))
         if use_particles:
@@ -323,7 +322,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class WeightedVariance(DerivedQuantity):
     r"""


https://bitbucket.org/yt_analysis/yt/commits/924fe5a9a791/
Changeset:   924fe5a9a791
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-22 00:17:17
Summary:     Making field queries consistent in derived quantities.
Affected #:  1 file

diff -r 3859d333d3aab6e30cc633b7df662c6c586ec4ba -r 924fe5a9a791f374983b0baa7d2a5ef83d2f6953 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -235,14 +235,14 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_position_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_position_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -297,15 +297,15 @@
     def process_chunk(self, data, use_gas = True, use_particles = False):
         vals = []
         if use_gas:
-            vals += [(data["velocity_%s" % ax] * 
-                      data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", "velocity_%s" % ax] * 
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_velocity_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_velocity_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):


https://bitbucket.org/yt_analysis/yt/commits/66cc55c5d06e/
Changeset:   66cc55c5d06e
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-22 03:42:17
Summary:     Merging
Affected #:  2 files

diff -r eab19453cc33aaa4f78f66ef1ab1d81930abe2e2 -r 66cc55c5d06e798895820d4e2029613be3cd3532 yt/analysis_modules/level_sets/clump_validators.py
--- a/yt/analysis_modules/level_sets/clump_validators.py
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -15,8 +15,11 @@
 
 import numpy as np
 
+from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.operator_registry import \
-     OperatorRegistry
+    OperatorRegistry
+from yt.utilities.physical_constants import \
+    gravitational_constant_cgs as G
 
 clump_validator_registry = OperatorRegistry()
 
@@ -38,11 +41,52 @@
     def __call__(self, clump):
         return self.function(clump, *self.args, **self.kwargs)
     
-def _gravitationally_bound(clump, truncate=True,
-                           include_thermal_energy=True):
+def _gravitationally_bound(clump, use_thermal_energy=True,
+                           use_particles=True, truncate=True):
     "True if clump is gravitationally bound."
-    return (clump.quantities.is_bound(truncate=truncate,
-        include_thermal_energy=include_thermal_energy) > 1.0)
+
+    use_particles &= \
+      ("all", "particle_mass") in clump.data.ds.field_info
+    
+    bulk_velocity = clump.quantities.bulk_velocity(use_particles=use_particles)
+
+    kinetic = 0.5 * (clump["gas", "cell_mass"] *
+        ((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
+         (bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
+         (bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
+
+    if use_thermal_energy:
+        kinetic += (clump["gas", "cell_mass"] *
+                    clump["gas", "thermal_energy"]).sum()
+
+    if use_particles:
+        kinetic += 0.5 * (clump["all", "particle_mass"] *
+            ((bulk_velocity[0] - clump["all", "particle_velocity_x"])**2 +
+             (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
+             (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
+
+    potential = clump.data.ds.quan(G *
+        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
+                          clump["index", "x"].in_cgs(),
+                          clump["index", "y"].in_cgs(),
+                          clump["index", "z"].in_cgs(),
+                          truncate, (kinetic / G).in_cgs()),
+        kinetic.in_cgs().units)
+    
+    if truncate and potential >= kinetic:
+        return True
+
+    if use_particles:
+        potential += clump.data.ds.quan(G *
+            FindBindingEnergy(
+                clump["all", "particle_mass"].in_cgs(),
+                clump["all", "particle_position_x"].in_cgs(),
+                clump["all", "particle_position_y"].in_cgs(),
+                clump["all", "particle_position_z"].in_cgs(),
+                truncate, ((kinetic - potential) / G).in_cgs()),
+        kinetic.in_cgs().units)
+
+    return potential >= kinetic
 add_validator("gravitationally_bound", _gravitationally_bound)
 
 def _min_cells(clump, n_cells):

diff -r eab19453cc33aaa4f78f66ef1ab1d81930abe2e2 -r 66cc55c5d06e798895820d4e2029613be3cd3532 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,14 +21,12 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
 from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs, \
-    mass_sun_cgs, \
     HUGE
 from yt.utilities.math_utils import prec_accum
 
@@ -237,14 +235,14 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_position_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_position_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -261,7 +259,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class BulkVelocity(DerivedQuantity):
     r"""
@@ -299,14 +297,15 @@
     def process_chunk(self, data, use_gas = True, use_particles = False):
         vals = []
         if use_gas:
-            vals += [(data["velocity_%s" % ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", "velocity_%s" % ax] * 
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_velocity_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_velocity_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -323,7 +322,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class WeightedVariance(DerivedQuantity):
     r"""


https://bitbucket.org/yt_analysis/yt/commits/2c42395fd351/
Changeset:   2c42395fd351
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-22 16:48:17
Summary:     Disabling a test that is not implemented.
Affected #:  1 file

diff -r 66cc55c5d06e798895820d4e2029613be3cd3532 -r 2c42395fd3517d2a022a6df96192a0d652e19fd7 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -22,10 +22,12 @@
         yield assert_equal, np.all(r["velocity_x"] > 0.25), True
         yield assert_equal, np.sort(dd["density"][t]), np.sort(r["density"])
         yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
-        t2 = (r["temperature"] < 0.75)
-        yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
-        yield assert_equal, np.all(r2["temperature"] < 0.75), True
+        # We are disabling these, as cutting cut regions does not presently
+        # work
+        #r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
+        #t2 = (r["temperature"] < 0.75)
+        #yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
+        #yield assert_equal, np.all(r2["temperature"] < 0.75), True
 
         # Now we can test some projections
         dd = ds.all_data()


https://bitbucket.org/yt_analysis/yt/commits/d3140187a891/
Changeset:   d3140187a891
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-22 23:19:30
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #1030)

Be much more careful about assigning clump IDs.
Affected #:  19 files

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,11 +1,7 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import numpy as np
 
 import yt
-from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
-                                                get_lowest_clumps)
+from yt.analysis_modules.level_sets.api import *
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"  # dataset to load
 # this is the field we look for contours over -- we could do
@@ -13,27 +9,25 @@
 # and 'Dark_Matter_Density'.
 field = "density"
 
-step = 2.0  # This is the multiplicative interval between contours.
+step = 2.0 # This is the multiplicative interval between contours.
 
-ds = yt.load(fn)  # load data
+ds = yt.load(fn) # load data
 
-# We want to find clumps over the entire dataset, so we'll just grab the whole
-# thing!  This is a convenience parameter that prepares an object that covers
-# the whole domain.  Note, though, that it will load on demand and not before!
-data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                      (8., 'kpc'), (1., 'kpc'))
+data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
+                      (8, 'kpc'), (1, 'kpc'))
 
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**np.floor(np.log10(data_source[field]).min())
-c_max = 10**np.floor(np.log10(data_source[field]).max() + 1)
-
-# keep only clumps with at least 20 cells
-function = 'self.data[\'%s\'].size > 20' % field
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # Now find get our 'base' clump -- this one just covers the whole domain.
-master_clump = Clump(data_source, None, field, function=function)
+master_clump = Clump(data_source, None, field)
+
+# Add a "validator" to weed out clumps with less than 20 cells.
+# As many validators can be added as you want.
+master_clump.add_validator("min_cells", 20)
 
 # This next command accepts our base clump and we say the range between which
 # we want to contour.  It recursively finds clumps within the master clump, at
@@ -44,32 +38,21 @@
 
 # As it goes, it appends the information about all the sub-clumps to the
 # master-clump.  Among different ways we can examine it, there's a convenience
-# function for outputting the full index to a file.
-f = open('%s_clump_index.txt' % ds, 'w')
-yt.amods.level_sets.write_clump_index(master_clump, 0, f)
-f.close()
+# function for outputting the full hierarchy to a file.
+write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
 # We can also output some handy information, as well.
-f = open('%s_clumps.txt' % ds, 'w')
-yt.amods.level_sets.write_clumps(master_clump, 0, f)
-f.close()
+write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-# We can traverse the clump index to get a list of all of the 'leaf' clumps
+# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
 
 # If you'd like to visualize these clumps, a list of clumps can be supplied to
 # the "clumps" callback on a plot.  First, we create a projection plot:
-prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20, 'kpc'))
+prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20,'kpc'))
 
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
 # Lastly, we write the plot to disk.
 prj.save('clumps')
-
-# We can also save the clump object to disk to read in later so we don't have
-# to spend a lot of time regenerating the clump objects.
-ds.save_object(master_clump, 'My_clumps')
-
-# Later, we can read in the clump object like so,
-master_clump = ds.load_object('My_clumps')

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -27,14 +27,15 @@
      ensure_list, is_root
 from yt.utilities.exceptions import YTUnitConversionError
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
      PhasePlot
-     
-from .operator_registry import \
-    callback_registry
 
+callback_registry = OperatorRegistry()
+    
 def add_callback(name, function):
     callback_registry[name] =  HaloCallback(function)
 

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -27,10 +27,13 @@
      
 from .halo_object import \
      Halo
-from .operator_registry import \
-     callback_registry, \
-     filter_registry, \
-     finding_method_registry, \
+from .halo_callbacks import \
+     callback_registry
+from .halo_filters import \
+     filter_registry
+from .halo_finding_methods import \
+     finding_method_registry
+from .halo_quantities import \
      quantity_registry
 
 class HaloCatalog(ParallelAnalysisInterface):

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -15,10 +15,13 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.spatial import KDTree
 
 from .halo_callbacks import HaloCallback
-from .operator_registry import filter_registry
+
+filter_registry = OperatorRegistry()
 
 def add_filter(name, function):
     filter_registry[name] = HaloFilter(function)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,10 +21,10 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 
-from .operator_registry import \
-    finding_method_registry
-
+finding_method_registry = OperatorRegistry()
 
 def add_finding_method(name, function):
     finding_method_registry[name] = HaloFindingMethod(function)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/halo_quantities.py
--- a/yt/analysis_modules/halo_analysis/halo_quantities.py
+++ b/yt/analysis_modules/halo_analysis/halo_quantities.py
@@ -15,8 +15,12 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
 from .halo_callbacks import HaloCallback
-from .operator_registry import quantity_registry
+
+quantity_registry = OperatorRegistry()
 
 def add_quantity(name, function):
     quantity_registry[name] = HaloQuantity(function)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Operation registry class
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import types
-
-class OperatorRegistry(dict):
-    def find(self, op, *args, **kwargs):
-        if isinstance(op, types.StringTypes):
-            # Lookup, assuming string or hashable object
-            op = copy.deepcopy(self[op])
-            op.args = args
-            op.kwargs = kwargs
-        return op
-
-callback_registry = OperatorRegistry()
-filter_registry = OperatorRegistry()
-finding_method_registry = OperatorRegistry()
-quantity_registry = OperatorRegistry()

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -21,12 +21,14 @@
     find_clumps, \
     get_lowest_clumps, \
     write_clump_index, \
-    write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
-    _DistanceToMainClump
+    write_clumps
 
+from .clump_info_items import \
+    add_clump_info
+
+from .clump_validators import \
+    add_validator
+    
 from .clump_tools import \
     recursive_all_clumps, \
     return_all_clumps, \

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,17 +13,41 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import copy
 import numpy as np
-import copy
+import uuid
 
-from yt.funcs import *
+from .clump_info_items import \
+     clump_info_registry
+from .clump_validators import \
+     clump_validator_registry
 
-from .contour_finder import identify_contours
+from .contour_finder import \
+     identify_contours
+
+from yt.fields.derived_field import \
+    ValidateSpatial
+
+def add_contour_field(ds, contour_key):
+    def _contours(field, data):
+        fd = data.get_field_parameter("contour_slices_%s" % contour_key)
+        vals = data["index", "ones"] * -1
+        if fd is None or fd == 0.0:
+            return vals
+        for sl, v in fd.get(data.id, []):
+            vals[sl] = v
+        return vals
+
+    ds.add_field(("index", "contours_%s" % contour_key),
+                 function=_contours,
+                 validators=[ValidateSpatial(0)],
+                 take_log=False,
+                 display_field=False)
 
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
-                 function=None, clump_info=None):
+                 clump_info=None, validators=None):
         self.parent = parent
         self.data = data
         self.quantities = data.quantities
@@ -40,23 +64,31 @@
             # Clump info will act the same if add_info_item is called before or after clump finding.
             self.clump_info = copy.deepcopy(clump_info)
 
-        # Function determining whether a clump is valid and should be kept.
-        self.default_function = 'self.data.quantities["IsBound"](truncate=True,include_thermal_energy=True) > 1.0'
-        if function is None:
-            self.function = self.default_function
-        else:
-            self.function = function
+        if validators is None:
+            validators = []
+        self.validators = validators
+        # Return value of validity function.
+        self.valid = None
 
-        # Return value of validity function, saved so it does not have to be calculated again.
-        self.function_value = None
-
-    def add_info_item(self,quantity,format):
+    def add_validator(self, validator, *args, **kwargs):
+        """
+        Add a validating function to determine whether the clump should 
+        be kept.
+        """
+        callback = clump_validator_registry.find(validator, *args, **kwargs)
+        self.validators.append(callback)
+        if self.children is None: return
+        for child in self.children:
+            child.add_validator(validator)
+        
+    def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
-        self.clump_info.append({'quantity':quantity, 'format':format})
+        callback = clump_info_registry.find(info_item, *args, **kwargs)
+        self.clump_info.append(callback)
         if self.children is None: return
         for child in self.children:
-            child.add_info_item(quantity,format)
+            child.add_info_item(info_item)
 
     def set_default_clump_info(self):
         "Defines default entries in the clump_info array."
@@ -64,22 +96,13 @@
         # add_info_item is recursive so this function does not need to be.
         self.clump_info = []
 
-        # Number of cells.
-        self.add_info_item('self.data["CellMassMsun"].size','"Cells: %d" % value')
-        # Gas mass in solar masses.
-        self.add_info_item('self.data["CellMassMsun"].sum()','"Mass: %e Msolar" % value')
-        # Volume-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")',
-                           '"Jeans Mass (vol-weighted): %.6e Msolar" % value')
-        # Mass-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")',
-                           '"Jeans Mass (mass-weighted): %.6e Msolar" % value')
-        # Max level.
-        self.add_info_item('self.data["GridLevel"].max()','"Max grid level: %d" % value')
-        # Minimum number density.
-        self.add_info_item('self.data["NumberDensity"].min()','"Min number density: %.6e cm^-3" % value')
-        # Maximum number density.
-        self.add_info_item('self.data["NumberDensity"].max()','"Max number density: %.6e cm^-3" % value')
+        self.add_info_item("total_cells")
+        self.add_info_item("cell_mass")
+        self.add_info_item("mass_weighted_jeans_mass")
+        self.add_info_item("volume_weighted_jeans_mass")
+        self.add_info_item("max_grid_level")
+        self.add_info_item("min_number_density")
+        self.add_info_item("max_number_density")
 
     def clear_clump_info(self):
         "Clears the clump_info array and passes the instruction to its children."
@@ -89,31 +112,40 @@
         for child in self.children:
             child.clear_clump_info()
 
-    def write_info(self,level,f_ptr):
+    def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
         for item in self.clump_info:
-            # Call if callable, otherwise do an eval.
-            if callable(item['quantity']):
-                value = item['quantity']()
-            else:
-                value = eval(item['quantity'])
-            output = eval(item['format'])
-            f_ptr.write("%s%s" % ('\t'*level,output))
-            f_ptr.write("\n")
+            value = item(self)
+            f_ptr.write("%s%s\n" % ('\t'*level, value))
 
     def find_children(self, min_val, max_val = None):
         if self.children is not None:
-            print "Wiping out existing children clumps."
+            print "Wiping out existing children clumps.", len(self.children)
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
-            new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid + 1)],
-                    {'contour_slices': cids})
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        contour_key = uuid.uuid4().hex
+        base_object = getattr(self.data, 'base_object', self.data)
+        add_contour_field(base_object.pf, contour_key)
+        for cid in sorted(unique_contours):
+            if cid == -1: continue
+            new_clump = base_object.cut_region(
+                    ["obj['contours_%s'] == %s" % (contour_key, cid)],
+                    {('contour_slices_%s' % contour_key): cids})
+            if new_clump["ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
-                                       self.cached_fields,function=self.function,
+                                       self.cached_fields,validators=self.validators,
                                        clump_info=self.clump_info))
 
     def pass_down(self,operation):
@@ -129,24 +161,30 @@
         for child in self.children:
             child.pass_down(operation)
 
-    def _isValid(self):
-        "Perform user specified function to determine if child clumps should be kept."
+    def _validate(self):
+        "Apply all user specified validator functions."
 
-        # Only call function if it has not been already.
-        if self.function_value is None:
-            self.function_value = eval(self.function)
+        # Only call functions if not done already.
+        if self.valid is not None:
+            return self.valid
 
-        return self.function_value
+        self.valid = True
+        for validator in self.validators:
+            self.valid &= validator(self)
+            if not self.valid:
+                break
+
+        return self.valid
 
     def __reduce__(self):
         return (_reconstruct_clump, 
                 (self.parent, self.field, self.min_val, self.max_val,
-                 self.function_value, self.children, self.data, self.clump_info, self.function))
+                 self.valid, self.children, self.data, self.clump_info, self.function))
 
     def __getitem__(self,request):
         return self.data[request]
 
-def _reconstruct_clump(parent, field, mi, ma, function_value, children, data, clump_info, 
+def _reconstruct_clump(parent, field, mi, ma, valid, children, data, clump_info, 
         function=None):
     obj = object.__new__(Clump)
     if iterable(parent):
@@ -155,8 +193,8 @@
         except KeyError:
             parent = parent
     if children is None: children = []
-    obj.parent, obj.field, obj.min_val, obj.max_val, obj.function_value, obj.children, obj.clump_info, obj.function = \
-        parent, field, mi, ma, function_value, children, clump_info, function
+    obj.parent, obj.field, obj.min_val, obj.max_val, obj.valid, obj.children, obj.clump_info, obj.function = \
+        parent, field, mi, ma, valid, children, clump_info, function
     # Now we override, because the parent/child relationship seems a bit
     # unreliable in the unpickling
     for child in children: child.parent = obj
@@ -180,10 +218,10 @@
             find_clumps(child, min_val*d_clump, max_val, d_clump)
             if ((child.children is not None) and (len(child.children) > 0)):
                 these_children.append(child)
-            elif (child._isValid()):
+            elif (child._validate()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children
@@ -206,88 +244,35 @@
 
     return clump_list
 
-def write_clump_index(clump,level,f_ptr):
+def write_clump_index(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
+        fh.write("\t")
+    fh.write("Clump at level %d:\n" % level)
+    clump.write_info(level, fh)
+    fh.write("\n")
+    fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
+            write_clump_index(child, (level+1), fh)
+    if top:
+        fh.close()
 
-def write_clumps(clump,level,f_ptr):
+def write_clumps(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        clump.write_info(level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
+        fh.write("%sClump:\n" % ("\t"*level))
+        clump.write_info(level, fh)
+        fh.write("\n")
+        fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-# Old clump info writing routines.
-def write_old_clump_index(clump,level,f_ptr):
-    for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    write_old_clump_info(clump,level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
-
-def write_old_clumps(clump,level,f_ptr):
-    if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        write_old_clump_info(clump,level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-__clump_info_template = \
-"""
-%(tl)sCells: %(num_cells)s
-%(tl)sMass: %(total_mass).6e Msolar
-%(tl)sJeans Mass (vol-weighted): %(jeans_mass_vol).6e Msolar
-%(tl)sJeans Mass (mass-weighted): %(jeans_mass_mass).6e Msolar
-%(tl)sMax grid level: %(max_level)s
-%(tl)sMin number density: %(min_density).6e cm^-3
-%(tl)sMax number density: %(max_density).6e cm^-3
-
-"""
-
-def write_old_clump_info(clump,level,f_ptr):
-    fmt_dict = {'tl':  "\t" * level}
-    fmt_dict['num_cells'] = clump.data["CellMassMsun"].size,
-    fmt_dict['total_mass'] = clump.data["CellMassMsun"].sum()
-    fmt_dict['jeans_mass_vol'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")
-    fmt_dict['jeans_mass_mass'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")
-    fmt_dict['max_level'] =  clump.data["GridLevel"].max()
-    fmt_dict['min_density'] =  clump.data["NumberDensity"].min()
-    fmt_dict['max_density'] =  clump.data["NumberDensity"].max()
-    f_ptr.write(__clump_info_template % fmt_dict)
-
-# Recipes for various clump calculations.
-recipes = {}
-
-# Distance from clump center of mass to center of mass of top level object.
-def _DistanceToMainClump(master,units='pc'):
-    masterCOM = master.data.quantities['CenterOfMass']()
-    pass_command = "self.masterCOM = [%.10f, %.10f, %.10f]" % (masterCOM[0],
-                                                               masterCOM[1],
-                                                               masterCOM[2])
-    master.pass_down(pass_command)
-    master.pass_down("self.com = self.data.quantities['CenterOfMass']()")
-
-    quantity = "((self.com[0]-self.masterCOM[0])**2 + (self.com[1]-self.masterCOM[1])**2 + (self.com[2]-self.masterCOM[2])**2)**(0.5)*self.data.ds.units['%s']" % units
-    format = "%s%s%s" % ("'Distance from center: %.6e ",units,"' % value")
-
-    master.add_info_item(quantity,format)
-
-recipes['DistanceToMainClump'] = _DistanceToMainClump
+            write_clumps(child, 0, fh)
+    if top:
+        fh.close()

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/level_sets/clump_info_items.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -0,0 +1,87 @@
+"""
+ClumpInfoCallback and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_info_registry = OperatorRegistry()
+
+def add_clump_info(name, function):
+    clump_info_registry[name] = ClumpInfoCallback(function)
+
+class ClumpInfoCallback(object):
+    r"""
+    A ClumpInfoCallback is a function that takes a clump, computes a 
+    quantity, and returns a string to be printed out for writing clump info.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _total_cells(clump):
+    n_cells = clump.data["index", "ones"].size
+    return "Cells: %d." % n_cells
+add_clump_info("total_cells", _total_cells)
+
+def _cell_mass(clump):
+    cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
+    return "Mass: %e Msun." % cell_mass
+add_clump_info("cell_mass", _cell_mass)
+
+def _mass_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+def _volume_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("index", "cell_volume")).in_units("Msun")
+    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
+
+def _max_grid_level(clump):
+    max_level = clump.data["index", "grid_level"].max()
+    return "Max grid level: %d." % max_level
+add_clump_info("max_grid_level", _max_grid_level)
+
+def _min_number_density(clump):
+    min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
+    return "Min number density: %.6e cm^-3." % min_n
+add_clump_info("min_number_density", _min_number_density)
+
+def _max_number_density(clump):
+    max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
+    return "Max number density: %.6e cm^-3." % max_n
+add_clump_info("max_number_density", _max_number_density)
+
+def _distance_to_main_clump(clump, units="pc"):
+    master = clump
+    while master.parent is not None:
+        master = master.parent
+    master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
+    my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
+    distance = np.sqrt(((master_com - my_com)**2).sum())
+    return "Distance from master center of mass: %.6e %s." % \
+      (distance.in_units(units), units)
+add_clump_info("distance_to_main_clump", _distance_to_main_clump)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/level_sets/clump_validators.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -0,0 +1,95 @@
+"""
+ClumpValidators and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.data_point_utilities import FindBindingEnergy
+from yt.utilities.operator_registry import \
+    OperatorRegistry
+from yt.utilities.physical_constants import \
+    gravitational_constant_cgs as G
+
+clump_validator_registry = OperatorRegistry()
+
+def add_validator(name, function):
+    clump_validator_registry[name] = ClumpValidator(function)
+
+class ClumpValidator(object):
+    r"""
+    A ClumpValidator is a function that takes a clump and returns 
+    True or False as to whether the clump is valid and shall be kept.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _gravitationally_bound(clump, use_thermal_energy=True,
+                           use_particles=True, truncate=True):
+    "True if clump is gravitationally bound."
+
+    use_particles &= \
+      ("all", "particle_mass") in clump.data.ds.field_info
+    
+    bulk_velocity = clump.quantities.bulk_velocity(use_particles=use_particles)
+
+    kinetic = 0.5 * (clump["gas", "cell_mass"] *
+        ((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
+         (bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
+         (bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
+
+    if use_thermal_energy:
+        kinetic += (clump["gas", "cell_mass"] *
+                    clump["gas", "thermal_energy"]).sum()
+
+    if use_particles:
+        kinetic += 0.5 * (clump["all", "particle_mass"] *
+            ((bulk_velocity[0] - clump["all", "particle_velocity_x"])**2 +
+             (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
+             (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
+
+    potential = clump.data.ds.quan(G *
+        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
+                          clump["index", "x"].in_cgs(),
+                          clump["index", "y"].in_cgs(),
+                          clump["index", "z"].in_cgs(),
+                          truncate, (kinetic / G).in_cgs()),
+        kinetic.in_cgs().units)
+    
+    if truncate and potential >= kinetic:
+        return True
+
+    if use_particles:
+        potential += clump.data.ds.quan(G *
+            FindBindingEnergy(
+                clump["all", "particle_mass"].in_cgs(),
+                clump["all", "particle_position_x"].in_cgs(),
+                clump["all", "particle_position_y"].in_cgs(),
+                clump["all", "particle_position_z"].in_cgs(),
+                truncate, ((kinetic - potential) / G).in_cgs()),
+        kinetic.in_cgs().units)
+
+    return potential >= kinetic
+add_validator("gravitationally_bound", _gravitationally_bound)
+
+def _min_cells(clump, n_cells):
+    "True if clump has a minimum number of cells."
+    return (clump["index", "ones"].size >= n_cells)
+add_validator("min_cells", _min_cells)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -39,9 +39,9 @@
         node_ids.append(nid)
         values = g[field][sl].astype("float64")
         contour_ids = np.zeros(dims, "int64") - 1
-        gct.identify_contours(values, contour_ids, total_contours)
+        total_contours += gct.identify_contours(values, contour_ids,
+                                                total_contours)
         new_contours = tree.cull_candidates(contour_ids)
-        total_contours += new_contours.shape[0]
         tree.add_contours(new_contours)
         # Now we can create a partitioned grid with the contours.
         LE = (DLE + g.dds * gi).in_units("code_length").ndarray_view()
@@ -51,6 +51,8 @@
             LE, RE, dims.astype("int64"))
         contours[nid] = (g.Level, node.node_ind, pg, sl)
     node_ids = np.array(node_ids)
+    if node_ids.size == 0:
+        return 0, {}
     trunk = data_source.tiles.tree.trunk
     mylog.info("Linking node (%s) contours.", len(contours))
     link_node_contours(trunk, contours, tree, node_ids)

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,14 +21,12 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
 from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs, \
-    mass_sun_cgs, \
     HUGE
 from yt.utilities.math_utils import prec_accum
 
@@ -237,14 +235,14 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_position_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_position_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -261,7 +259,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class BulkVelocity(DerivedQuantity):
     r"""
@@ -299,14 +297,15 @@
     def process_chunk(self, data, use_gas = True, use_particles = False):
         vals = []
         if use_gas:
-            vals += [(data["velocity_%s" % ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", "velocity_%s" % ax] * 
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_velocity_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_velocity_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -323,7 +322,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class WeightedVariance(DerivedQuantity):
     r"""

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -16,6 +16,7 @@
 
 import types
 import numpy as np
+from contextlib import contextmanager
 
 from yt.funcs import *
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
@@ -718,6 +719,22 @@
             self.field_data[field] = self.base_object[field][ind]
 
     @property
+    def blocks(self):
+        # We have to take a slightly different approach here.  Note that all
+        # that .blocks has to yield is a 3D array and a mask.
+        for obj, m in self.base_object.blocks:
+            m = m.copy()
+            with obj._field_parameter_state(self.field_parameters):
+                for cond in self.conditionals:
+                    ss = eval(cond)
+                    m = np.logical_and(m, ss, m)
+            if not np.any(m): continue
+            yield obj, m
+
+    def cut_region(self, *args, **kwargs):
+        raise NotImplementedError
+
+    @property
     def _cond_ind(self):
         ind = None
         obj = self.base_object

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -22,10 +22,12 @@
         yield assert_equal, np.all(r["velocity_x"] > 0.25), True
         yield assert_equal, np.sort(dd["density"][t]), np.sort(r["density"])
         yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
-        t2 = (r["temperature"] < 0.75)
-        yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
-        yield assert_equal, np.all(r2["temperature"] < 0.75), True
+        # We are disabling these, as cutting cut regions does not presently
+        # work
+        #r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
+        #t2 = (r["temperature"] < 0.75)
+        #yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
+        #yield assert_equal, np.all(r2["temperature"] < 0.75), True
 
         # Now we can test some projections
         dd = ds.all_data()

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -207,18 +207,3 @@
              units="cm",
              display_field=False)
 
-    def _contours(field, data):
-        fd = data.get_field_parameter("contour_slices")
-        vals = data["index", "ones"] * -1
-        if fd is None or fd == 0.0:
-            return vals
-        for sl, v in fd.get(data.id, []):
-            vals[sl] = v
-        return vals
-    
-    registry.add_field(("index", "contours"),
-                       function=_contours,
-                       validators=[ValidateSpatial(0)],
-                       take_log=False,
-                       display_field=False)
-

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -228,7 +228,7 @@
         cdef int i, n, ins
         cdef np.int64_t cid1, cid2
         # Okay, this requires lots of iteration, unfortunately
-        cdef ContourID *cur, *root
+        cdef ContourID *cur, *c1, *c2
         n = join_tree.shape[0]
         #print "Counting"
         #print "Checking", self.count()
@@ -253,6 +253,7 @@
                 print "  Inspected ", ins
                 raise RuntimeError
             else:
+                c1.count = c2.count = 0
                 contour_union(c1, c2)
 
     def count(self):
@@ -335,6 +336,7 @@
                                 c2 = container[offset]
                                 if c2 == NULL: continue
                                 c2 = contour_find(c2)
+                                cur.count = c2.count = 0
                                 contour_union(cur, c2)
                                 cur = contour_find(cur)
         for i in range(ni):
@@ -342,13 +344,13 @@
                 for k in range(nk):
                     c1 = container[i*nj*nk + j*nk + k]
                     if c1 == NULL: continue
-                    cur = c1
                     c1 = contour_find(c1)
                     contour_ids[i,j,k] = c1.contour_id
         
         for i in range(ni*nj*nk): 
             if container[i] != NULL: free(container[i])
         free(container)
+        return nc
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -383,6 +385,7 @@
         if spos[i] <= vc.left_edge[i] or spos[i] >= vc.right_edge[i]: return 0
     return 1
 
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 cdef void construct_boundary_relationships(Node trunk, ContourTree tree, 
@@ -391,227 +394,68 @@
                 np.ndarray[np.int64_t, ndim=1] node_ids):
     # We only look at the boundary and find the nodes next to it.
     # Contours is a dict, keyed by the node.id.
-    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
+    cdef int i, j, off_i, off_j, oi, oj, level, ax, ax0, ax1, n1, n2
     cdef np.int64_t c1, c2
     cdef Node adj_node
     cdef VolumeContainer *vc1, *vc0 = vcs[nid]
-    nx = vc0.dims[0]
-    ny = vc0.dims[1]
-    nz = vc0.dims[2]
-    cdef int s = (ny*nx + nx*nz + ny*nz) * 18
+    cdef int s = (vc0.dims[1]*vc0.dims[0]
+                + vc0.dims[0]*vc0.dims[2]
+                + vc0.dims[1]*vc0.dims[2]) * 18
     # We allocate an array of fixed (maximum) size
     cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
-    cdef int ti = 0
-    cdef int index
+    cdef int ti = 0, side
+    cdef int index, pos[3], my_pos[3]
     cdef np.float64_t spos[3]
 
-    # First the x-pass
-    for i in range(ny):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    # Adjust by -1 in x, then oi and oj in y and z
-                    get_spos(vc0, -1, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, 0, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-                    # This is outside our vc
-                    get_spos(vc0, nx, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, nx - 1, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-    # Now y-pass
-    for i in range(nx):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    get_spos(vc0, i + oi, -1, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, 0, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
+    for ax in range(3):
+        ax0 = (ax + 1) % 3
+        ax1 = (ax + 2) % 3
+        n1 = vc0.dims[ax0]
+        n2 = vc0.dims[ax1]
+        for i in range(n1):
+            for j in range(n2):
+                for off_i in range(3):
+                    oi = off_i - 1
+                    if i == 0 and oi == -1: continue
+                    if i == n1 - 1 and oi == 1: continue
+                    for off_j in range(3):
+                        oj = off_j - 1
+                        if j == 0 and oj == -1: continue
+                        if j == n2 - 1 and oj == 1: continue
+                        pos[ax0] = i + oi
+                        pos[ax1] = j + oj
+                        my_pos[ax0] = i
+                        my_pos[ax1] = j
+                        for side in range(2):
+                            # We go off each end of the block.
+                            if side == 0:
+                                pos[ax] = -1
+                                my_pos[ax] = 0
+                            else:
+                                pos[ax] = vc0.dims[ax]
+                                my_pos[ax] = vc0.dims[ax]-1
+                            get_spos(vc0, pos[0], pos[1], pos[2], ax, spos)
+                            adj_node = _find_node(trunk, spos)
+                            vc1 = vcs[adj_node.node_ind]
+                            if spos_contained(vc1, spos):
+                                index = vc_index(vc0, my_pos[0], 
+                                                 my_pos[1], my_pos[2])
+                                c1 = (<np.int64_t*>vc0.data[0])[index]
+                                index = vc_pos_index(vc1, spos)
+                                c2 = (<np.int64_t*>vc1.data[0])[index]
+                                if c1 > -1 and c2 > -1:
+                                    if examined[adj_node.node_ind] == 0:
+                                        joins[ti,0] = i64max(c1,c2)
+                                        joins[ti,1] = i64min(c1,c2)
+                                    else:
+                                        joins[ti,0] = c1
+                                        joins[ti,1] = c2
+                                    ti += 1
 
-                    get_spos(vc0, i + oi, ny, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, ny - 1, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-
-    # Now z-pass
-    for i in range(nx):
-        for j in range(ny):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, 0)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-
-                    get_spos(vc0, i + oi, j + oj, nz, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, nz - 1)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
     if ti == 0: return
     new_joins = tree.cull_joins(joins[:ti,:])
     tree.add_joins(new_joins)
 
-cdef inline int are_neighbors(
-            np.float64_t x1, np.float64_t y1, np.float64_t z1,
-            np.float64_t dx1, np.float64_t dy1, np.float64_t dz1,
-            np.float64_t x2, np.float64_t y2, np.float64_t z2,
-            np.float64_t dx2, np.float64_t dy2, np.float64_t dz2,
-        ):
-    # We assume an epsilon of 1e-15
-    if fabs(x1-x2) > 0.5*(dx1+dx2): return 0
-    if fabs(y1-y2) > 0.5*(dy1+dy2): return 0
-    if fabs(z1-z2) > 0.5*(dz1+dz2): return 0
-    return 1
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def identify_field_neighbors(
-            np.ndarray[dtype=np.float64_t, ndim=1] field,
-            np.ndarray[dtype=np.float64_t, ndim=1] x,
-            np.ndarray[dtype=np.float64_t, ndim=1] y,
-            np.ndarray[dtype=np.float64_t, ndim=1] z,
-            np.ndarray[dtype=np.float64_t, ndim=1] dx,
-            np.ndarray[dtype=np.float64_t, ndim=1] dy,
-            np.ndarray[dtype=np.float64_t, ndim=1] dz,
-        ):
-    # We assume this field is pre-jittered; it has no identical values.
-    cdef int outer, inner, N, added
-    cdef np.float64_t x1, y1, z1, dx1, dy1, dz1
-    N = field.shape[0]
-    #cdef np.ndarray[dtype=np.object_t] joins
-    joins = [[] for outer in range(N)]
-    #joins = np.empty(N, dtype='object')
-    for outer in range(N):
-        if (outer % 10000) == 0: print outer, N
-        x1 = x[outer]
-        y1 = y[outer]
-        z1 = z[outer]
-        dx1 = dx[outer]
-        dy1 = dy[outer]
-        dz1 = dz[outer]
-        this_joins = joins[outer]
-        added = 0
-        # Go in reverse order
-        for inner in range(outer, 0, -1):
-            if not are_neighbors(x1, y1, z1, dx1, dy1, dz1,
-                                 x[inner], y[inner], z[inner],
-                                 dx[inner], dy[inner], dz[inner]):
-                continue
-            # Hot dog, we have a weiner!
-            this_joins.append(inner)
-            added += 1
-            if added == 26: break
-    return joins
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def extract_identified_contours(int max_ind, joins):
-    cdef int i
-    contours = []
-    for i in range(max_ind + 1): # +1 to get to the max_ind itself
-        contours.append(set([i]))
-        if len(joins[i]) == 0:
-            continue
-        proto_contour = [i]
-        for j in joins[i]:
-            proto_contour += contours[j]
-        proto_contour = set(proto_contour)
-        for j in proto_contour:
-            contours[j] = proto_contour
-    return contours
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def update_flat_joins(np.ndarray[np.int64_t, ndim=2] joins,
-                 np.ndarray[np.int64_t, ndim=1] contour_ids,
-                 np.ndarray[np.int64_t, ndim=1] final_joins):
-    cdef np.int64_t new, old
-    cdef int i, j, nj, nf, counter
-    cdef int ci, cj, ck
-    nj = joins.shape[0]
-    nf = final_joins.shape[0]
-    for ci in range(contour_ids.shape[0]):
-        if contour_ids[ci] == -1: continue
-        for j in range(nj):
-            if contour_ids[ci] == joins[j,0]:
-                contour_ids[ci] = joins[j,1]
-                break
-        for j in range(nf):
-            if contour_ids[ci] == final_joins[j]:
-                contour_ids[ci] = j + 1
-                break
-
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def update_joins(np.ndarray[np.int64_t, ndim=2] joins,

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/utilities/operator_registry.py
--- /dev/null
+++ b/yt/utilities/operator_registry.py
@@ -0,0 +1,26 @@
+"""
+Operation registry class
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import copy
+import types
+
+class OperatorRegistry(dict):
+    def find(self, op, *args, **kwargs):
+        if isinstance(op, types.StringTypes):
+            # Lookup, assuming string or hashable object
+            op = copy.deepcopy(self[op])
+            op.args = args
+            op.kwargs = kwargs
+        return op

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r d3140187a8918755203a2e7150ff2b42b2ccba4f yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -689,20 +689,20 @@
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
-            mylog.debug("Pixelizing contour %s", i)
+            mylog.info("Pixelizing contour %s", i)
 
-            xf_copy = clump[xf].copy()
-            yf_copy = clump[yf].copy()
+            xf_copy = clump[xf].copy().in_units("code_length")
+            yf_copy = clump[yf].copy().in_units("code_length")
 
             temp = _MPL.Pixelize(xf_copy, yf_copy,
-                                 clump[dxf]/2.0,
-                                 clump[dyf]/2.0,
-                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
+                                 clump[dxf].in_units("code_length")/2.0,
+                                 clump[dyf].in_units("code_length")/2.0,
+                                 clump[dxf].d*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
-                                     extent=extent,**self.plot_args)
+                                     extent=extent, **self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list