[yt-svn] commit/yt: 32 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 29 13:16:40 PDT 2014


32 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/c2a973002c37/
Changeset:   c2a973002c37
Branch:      yt-3.0
User:        chummels
Date:        2014-07-22 23:15:02
Summary:     Editing docstring for covering_grids
Affected #:  1 file

diff -r 3c4dc9e27719f260e29bcbc6ad18c4a3601ed1f9 -r c2a973002c3710c762dac708242b457ee4526114 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -388,12 +388,14 @@
 
 class YTCoveringGridBase(YTSelectionContainer3D):
     """A 3D region with all data extracted to a single, specified
-    resolution.
+    resolution.  Left edge should align with a cell boundary, but 
+    defaults to the closest cell boundary.
     
     Parameters
     ----------
     level : int
-        The resolution level data is uniformly gridded at
+        The resolution level data to which data will be gridded. Level
+        0 is the root grid dx for that dataset.
     left_edge : array_like
         The left edge of the region to be extracted
     dims : array_like


https://bitbucket.org/yt_analysis/yt/commits/78ee86070249/
Changeset:   78ee86070249
Branch:      yt-3.0
User:        chummels
Date:        2014-07-23 01:41:01
Summary:     Merging.
Affected #:  20 files

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -97,7 +97,7 @@
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-show_authors = True
+show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
 pygments_style = 'sphinx'

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -1,11 +1,7 @@
-### THIS RECIPE IS CURRENTLY BROKEN IN YT-3.0
-### DO NOT TRUST THIS RECIPE UNTIL THIS LINE IS REMOVED
-
 import numpy as np
 
 import yt
-from yt.analysis_modules.level_sets.api import (Clump, find_clumps,
-                                                get_lowest_clumps)
+from yt.analysis_modules.level_sets.api import *
 
 fn = "IsolatedGalaxy/galaxy0030/galaxy0030"  # dataset to load
 # this is the field we look for contours over -- we could do
@@ -13,27 +9,25 @@
 # and 'Dark_Matter_Density'.
 field = "density"
 
-step = 2.0  # This is the multiplicative interval between contours.
+step = 2.0 # This is the multiplicative interval between contours.
 
-ds = yt.load(fn)  # load data
+ds = yt.load(fn) # load data
 
-# We want to find clumps over the entire dataset, so we'll just grab the whole
-# thing!  This is a convenience parameter that prepares an object that covers
-# the whole domain.  Note, though, that it will load on demand and not before!
-data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                      (8., 'kpc'), (1., 'kpc'))
+data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
+                      (8, 'kpc'), (1, 'kpc'))
 
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**np.floor(np.log10(data_source[field]).min())
-c_max = 10**np.floor(np.log10(data_source[field]).max() + 1)
-
-# keep only clumps with at least 20 cells
-function = 'self.data[\'%s\'].size > 20' % field
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # Now find get our 'base' clump -- this one just covers the whole domain.
-master_clump = Clump(data_source, None, field, function=function)
+master_clump = Clump(data_source, None, field)
+
+# Add a "validator" to weed out clumps with less than 20 cells.
+# As many validators can be added as you want.
+master_clump.add_validator("min_cells", 20)
 
 # This next command accepts our base clump and we say the range between which
 # we want to contour.  It recursively finds clumps within the master clump, at
@@ -44,32 +38,21 @@
 
 # As it goes, it appends the information about all the sub-clumps to the
 # master-clump.  Among different ways we can examine it, there's a convenience
-# function for outputting the full index to a file.
-f = open('%s_clump_index.txt' % ds, 'w')
-yt.amods.level_sets.write_clump_index(master_clump, 0, f)
-f.close()
+# function for outputting the full hierarchy to a file.
+write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
 # We can also output some handy information, as well.
-f = open('%s_clumps.txt' % ds, 'w')
-yt.amods.level_sets.write_clumps(master_clump, 0, f)
-f.close()
+write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-# We can traverse the clump index to get a list of all of the 'leaf' clumps
+# We can traverse the clump hierarchy to get a list of all of the 'leaf' clumps
 leaf_clumps = get_lowest_clumps(master_clump)
 
 # If you'd like to visualize these clumps, a list of clumps can be supplied to
 # the "clumps" callback on a plot.  First, we create a projection plot:
-prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20, 'kpc'))
+prj = yt.ProjectionPlot(ds, 2, field, center='c', width=(20,'kpc'))
 
 # Next we annotate the plot with contours on the borders of the clumps
 prj.annotate_clumps(leaf_clumps)
 
 # Lastly, we write the plot to disk.
 prj.save('clumps')
-
-# We can also save the clump object to disk to read in later so we don't have
-# to spend a lot of time regenerating the clump objects.
-ds.save_object(master_clump, 'My_clumps')
-
-# Later, we can read in the clump object like so,
-master_clump = ds.load_object('My_clumps')

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/halo_callbacks.py
--- a/yt/analysis_modules/halo_analysis/halo_callbacks.py
+++ b/yt/analysis_modules/halo_analysis/halo_callbacks.py
@@ -27,14 +27,15 @@
      ensure_list, is_root
 from yt.utilities.exceptions import YTUnitConversionError
 from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
 from yt.visualization.profile_plotter import \
      PhasePlot
-     
-from .operator_registry import \
-    callback_registry
 
+callback_registry = OperatorRegistry()
+    
 def add_callback(name, function):
     callback_registry[name] =  HaloCallback(function)
 

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -27,10 +27,13 @@
      
 from .halo_object import \
      Halo
-from .operator_registry import \
-     callback_registry, \
-     filter_registry, \
-     finding_method_registry, \
+from .halo_callbacks import \
+     callback_registry
+from .halo_filters import \
+     filter_registry
+from .halo_finding_methods import \
+     finding_method_registry
+from .halo_quantities import \
      quantity_registry
 
 class HaloCatalog(ParallelAnalysisInterface):

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -15,10 +15,13 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 from yt.utilities.spatial import KDTree
 
 from .halo_callbacks import HaloCallback
-from .operator_registry import filter_registry
+
+filter_registry = OperatorRegistry()
 
 def add_filter(name, function):
     filter_registry[name] = HaloFilter(function)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,10 +21,10 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.utilities.operator_registry import \
+     OperatorRegistry
 
-from .operator_registry import \
-    finding_method_registry
-
+finding_method_registry = OperatorRegistry()
 
 def add_finding_method(name, function):
     finding_method_registry[name] = HaloFindingMethod(function)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/halo_quantities.py
--- a/yt/analysis_modules/halo_analysis/halo_quantities.py
+++ b/yt/analysis_modules/halo_analysis/halo_quantities.py
@@ -15,8 +15,12 @@
 
 import numpy as np
 
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
 from .halo_callbacks import HaloCallback
-from .operator_registry import quantity_registry
+
+quantity_registry = OperatorRegistry()
 
 def add_quantity(name, function):
     quantity_registry[name] = HaloQuantity(function)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Operation registry class
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import copy
-import types
-
-class OperatorRegistry(dict):
-    def find(self, op, *args, **kwargs):
-        if isinstance(op, types.StringTypes):
-            # Lookup, assuming string or hashable object
-            op = copy.deepcopy(self[op])
-            op.args = args
-            op.kwargs = kwargs
-        return op
-
-callback_registry = OperatorRegistry()
-filter_registry = OperatorRegistry()
-finding_method_registry = OperatorRegistry()
-quantity_registry = OperatorRegistry()

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -21,12 +21,14 @@
     find_clumps, \
     get_lowest_clumps, \
     write_clump_index, \
-    write_clumps, \
-    write_old_clump_index, \
-    write_old_clumps, \
-    write_old_clump_info, \
-    _DistanceToMainClump
+    write_clumps
 
+from .clump_info_items import \
+    add_clump_info
+
+from .clump_validators import \
+    add_validator
+    
 from .clump_tools import \
     recursive_all_clumps, \
     return_all_clumps, \

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -13,17 +13,41 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import copy
 import numpy as np
-import copy
+import uuid
 
-from yt.funcs import *
+from .clump_info_items import \
+     clump_info_registry
+from .clump_validators import \
+     clump_validator_registry
 
-from .contour_finder import identify_contours
+from .contour_finder import \
+     identify_contours
+
+from yt.fields.derived_field import \
+    ValidateSpatial
+
+def add_contour_field(ds, contour_key):
+    def _contours(field, data):
+        fd = data.get_field_parameter("contour_slices_%s" % contour_key)
+        vals = data["index", "ones"] * -1
+        if fd is None or fd == 0.0:
+            return vals
+        for sl, v in fd.get(data.id, []):
+            vals[sl] = v
+        return vals
+
+    ds.add_field(("index", "contours_%s" % contour_key),
+                 function=_contours,
+                 validators=[ValidateSpatial(0)],
+                 take_log=False,
+                 display_field=False)
 
 class Clump(object):
     children = None
     def __init__(self, data, parent, field, cached_fields = None, 
-                 function=None, clump_info=None):
+                 clump_info=None, validators=None):
         self.parent = parent
         self.data = data
         self.quantities = data.quantities
@@ -40,23 +64,31 @@
             # Clump info will act the same if add_info_item is called before or after clump finding.
             self.clump_info = copy.deepcopy(clump_info)
 
-        # Function determining whether a clump is valid and should be kept.
-        self.default_function = 'self.data.quantities["IsBound"](truncate=True,include_thermal_energy=True) > 1.0'
-        if function is None:
-            self.function = self.default_function
-        else:
-            self.function = function
+        if validators is None:
+            validators = []
+        self.validators = validators
+        # Return value of validity function.
+        self.valid = None
 
-        # Return value of validity function, saved so it does not have to be calculated again.
-        self.function_value = None
-
-    def add_info_item(self,quantity,format):
+    def add_validator(self, validator, *args, **kwargs):
+        """
+        Add a validating function to determine whether the clump should 
+        be kept.
+        """
+        callback = clump_validator_registry.find(validator, *args, **kwargs)
+        self.validators.append(callback)
+        if self.children is None: return
+        for child in self.children:
+            child.add_validator(validator)
+        
+    def add_info_item(self, info_item, *args, **kwargs):
         "Adds an entry to clump_info list and tells children to do the same."
 
-        self.clump_info.append({'quantity':quantity, 'format':format})
+        callback = clump_info_registry.find(info_item, *args, **kwargs)
+        self.clump_info.append(callback)
         if self.children is None: return
         for child in self.children:
-            child.add_info_item(quantity,format)
+            child.add_info_item(info_item)
 
     def set_default_clump_info(self):
         "Defines default entries in the clump_info array."
@@ -64,22 +96,13 @@
         # add_info_item is recursive so this function does not need to be.
         self.clump_info = []
 
-        # Number of cells.
-        self.add_info_item('self.data["CellMassMsun"].size','"Cells: %d" % value')
-        # Gas mass in solar masses.
-        self.add_info_item('self.data["CellMassMsun"].sum()','"Mass: %e Msolar" % value')
-        # Volume-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")',
-                           '"Jeans Mass (vol-weighted): %.6e Msolar" % value')
-        # Mass-weighted Jeans mass.
-        self.add_info_item('self.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")',
-                           '"Jeans Mass (mass-weighted): %.6e Msolar" % value')
-        # Max level.
-        self.add_info_item('self.data["GridLevel"].max()','"Max grid level: %d" % value')
-        # Minimum number density.
-        self.add_info_item('self.data["NumberDensity"].min()','"Min number density: %.6e cm^-3" % value')
-        # Maximum number density.
-        self.add_info_item('self.data["NumberDensity"].max()','"Max number density: %.6e cm^-3" % value')
+        self.add_info_item("total_cells")
+        self.add_info_item("cell_mass")
+        self.add_info_item("mass_weighted_jeans_mass")
+        self.add_info_item("volume_weighted_jeans_mass")
+        self.add_info_item("max_grid_level")
+        self.add_info_item("min_number_density")
+        self.add_info_item("max_number_density")
 
     def clear_clump_info(self):
         "Clears the clump_info array and passes the instruction to its children."
@@ -89,31 +112,40 @@
         for child in self.children:
             child.clear_clump_info()
 
-    def write_info(self,level,f_ptr):
+    def write_info(self, level, f_ptr):
         "Writes information for clump using the list of items in clump_info."
 
         for item in self.clump_info:
-            # Call if callable, otherwise do an eval.
-            if callable(item['quantity']):
-                value = item['quantity']()
-            else:
-                value = eval(item['quantity'])
-            output = eval(item['format'])
-            f_ptr.write("%s%s" % ('\t'*level,output))
-            f_ptr.write("\n")
+            value = item(self)
+            f_ptr.write("%s%s\n" % ('\t'*level, value))
 
     def find_children(self, min_val, max_val = None):
         if self.children is not None:
-            print "Wiping out existing children clumps."
+            print "Wiping out existing children clumps.", len(self.children)
         self.children = []
         if max_val is None: max_val = self.max_val
         nj, cids = identify_contours(self.data, self.field, min_val, max_val)
-        for cid in range(nj):
-            new_clump = self.data.cut_region(
-                    ["obj['contours'] == %s" % (cid + 1)],
-                    {'contour_slices': cids})
+        # Here, cids is the set of slices and values, keyed by the
+        # parent_grid_id, that defines the contours.  So we can figure out all
+        # the unique values of the contours by examining the list here.
+        unique_contours = set([])
+        for sl_list in cids.values():
+            for sl, ff in sl_list:
+                unique_contours.update(np.unique(ff))
+        contour_key = uuid.uuid4().hex
+        base_object = getattr(self.data, 'base_object', self.data)
+        add_contour_field(base_object.pf, contour_key)
+        for cid in sorted(unique_contours):
+            if cid == -1: continue
+            new_clump = base_object.cut_region(
+                    ["obj['contours_%s'] == %s" % (contour_key, cid)],
+                    {('contour_slices_%s' % contour_key): cids})
+            if new_clump["ones"].size == 0:
+                # This is to skip possibly duplicate clumps.  Using "ones" here
+                # will speed things up.
+                continue
             self.children.append(Clump(new_clump, self, self.field,
-                                       self.cached_fields,function=self.function,
+                                       self.cached_fields,validators=self.validators,
                                        clump_info=self.clump_info))
 
     def pass_down(self,operation):
@@ -129,24 +161,30 @@
         for child in self.children:
             child.pass_down(operation)
 
-    def _isValid(self):
-        "Perform user specified function to determine if child clumps should be kept."
+    def _validate(self):
+        "Apply all user specified validator functions."
 
-        # Only call function if it has not been already.
-        if self.function_value is None:
-            self.function_value = eval(self.function)
+        # Only call functions if not done already.
+        if self.valid is not None:
+            return self.valid
 
-        return self.function_value
+        self.valid = True
+        for validator in self.validators:
+            self.valid &= validator(self)
+            if not self.valid:
+                break
+
+        return self.valid
 
     def __reduce__(self):
         return (_reconstruct_clump, 
                 (self.parent, self.field, self.min_val, self.max_val,
-                 self.function_value, self.children, self.data, self.clump_info, self.function))
+                 self.valid, self.children, self.data, self.clump_info, self.function))
 
     def __getitem__(self,request):
         return self.data[request]
 
-def _reconstruct_clump(parent, field, mi, ma, function_value, children, data, clump_info, 
+def _reconstruct_clump(parent, field, mi, ma, valid, children, data, clump_info, 
         function=None):
     obj = object.__new__(Clump)
     if iterable(parent):
@@ -155,8 +193,8 @@
         except KeyError:
             parent = parent
     if children is None: children = []
-    obj.parent, obj.field, obj.min_val, obj.max_val, obj.function_value, obj.children, obj.clump_info, obj.function = \
-        parent, field, mi, ma, function_value, children, clump_info, function
+    obj.parent, obj.field, obj.min_val, obj.max_val, obj.valid, obj.children, obj.clump_info, obj.function = \
+        parent, field, mi, ma, valid, children, clump_info, function
     # Now we override, because the parent/child relationship seems a bit
     # unreliable in the unpickling
     for child in children: child.parent = obj
@@ -180,10 +218,10 @@
             find_clumps(child, min_val*d_clump, max_val, d_clump)
             if ((child.children is not None) and (len(child.children) > 0)):
                 these_children.append(child)
-            elif (child._isValid()):
+            elif (child._validate()):
                 these_children.append(child)
             else:
-                print "Eliminating invalid, childless clump with %d cells." % len(child.data["Ones"])
+                print "Eliminating invalid, childless clump with %d cells." % len(child.data["ones"])
         if (len(these_children) > 1):
             print "%d of %d children survived." % (len(these_children),len(clump.children))            
             clump.children = these_children
@@ -206,88 +244,35 @@
 
     return clump_list
 
-def write_clump_index(clump,level,f_ptr):
+def write_clump_index(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
+        fh.write("\t")
+    fh.write("Clump at level %d:\n" % level)
+    clump.write_info(level, fh)
+    fh.write("\n")
+    fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
+            write_clump_index(child, (level+1), fh)
+    if top:
+        fh.close()
 
-def write_clumps(clump,level,f_ptr):
+def write_clumps(clump, level, fh):
+    top = False
+    if not isinstance(fh, file):
+        fh = open(fh, "w")
+        top = True
     if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        clump.write_info(level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
+        fh.write("%sClump:\n" % ("\t"*level))
+        clump.write_info(level, fh)
+        fh.write("\n")
+        fh.flush()
     if ((clump.children is not None) and (len(clump.children) > 0)):
         for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-# Old clump info writing routines.
-def write_old_clump_index(clump,level,f_ptr):
-    for q in range(level):
-        f_ptr.write("\t")
-    f_ptr.write("Clump at level %d:\n" % level)
-    clump.write_info(level,f_ptr)
-    write_old_clump_info(clump,level,f_ptr)
-    f_ptr.write("\n")
-    f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clump_index(child,(level+1),f_ptr)
-
-def write_old_clumps(clump,level,f_ptr):
-    if ((clump.children is None) or (len(clump.children) == 0)):
-        f_ptr.write("%sClump:\n" % ("\t"*level))
-        write_old_clump_info(clump,level,f_ptr)
-        f_ptr.write("\n")
-        f_ptr.flush()
-    if ((clump.children is not None) and (len(clump.children) > 0)):
-        for child in clump.children:
-            write_clumps(child,0,f_ptr)
-
-__clump_info_template = \
-"""
-%(tl)sCells: %(num_cells)s
-%(tl)sMass: %(total_mass).6e Msolar
-%(tl)sJeans Mass (vol-weighted): %(jeans_mass_vol).6e Msolar
-%(tl)sJeans Mass (mass-weighted): %(jeans_mass_mass).6e Msolar
-%(tl)sMax grid level: %(max_level)s
-%(tl)sMin number density: %(min_density).6e cm^-3
-%(tl)sMax number density: %(max_density).6e cm^-3
-
-"""
-
-def write_old_clump_info(clump,level,f_ptr):
-    fmt_dict = {'tl':  "\t" * level}
-    fmt_dict['num_cells'] = clump.data["CellMassMsun"].size,
-    fmt_dict['total_mass'] = clump.data["CellMassMsun"].sum()
-    fmt_dict['jeans_mass_vol'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellVolume")
-    fmt_dict['jeans_mass_mass'] = clump.data.quantities["WeightedAverageQuantity"]("JeansMassMsun","CellMassMsun")
-    fmt_dict['max_level'] =  clump.data["GridLevel"].max()
-    fmt_dict['min_density'] =  clump.data["NumberDensity"].min()
-    fmt_dict['max_density'] =  clump.data["NumberDensity"].max()
-    f_ptr.write(__clump_info_template % fmt_dict)
-
-# Recipes for various clump calculations.
-recipes = {}
-
-# Distance from clump center of mass to center of mass of top level object.
-def _DistanceToMainClump(master,units='pc'):
-    masterCOM = master.data.quantities['CenterOfMass']()
-    pass_command = "self.masterCOM = [%.10f, %.10f, %.10f]" % (masterCOM[0],
-                                                               masterCOM[1],
-                                                               masterCOM[2])
-    master.pass_down(pass_command)
-    master.pass_down("self.com = self.data.quantities['CenterOfMass']()")
-
-    quantity = "((self.com[0]-self.masterCOM[0])**2 + (self.com[1]-self.masterCOM[1])**2 + (self.com[2]-self.masterCOM[2])**2)**(0.5)*self.data.ds.units['%s']" % units
-    format = "%s%s%s" % ("'Distance from center: %.6e ",units,"' % value")
-
-    master.add_info_item(quantity,format)
-
-recipes['DistanceToMainClump'] = _DistanceToMainClump
+            write_clumps(child, 0, fh)
+    if top:
+        fh.close()

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/level_sets/clump_info_items.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_info_items.py
@@ -0,0 +1,87 @@
+"""
+ClumpInfoCallback and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.operator_registry import \
+     OperatorRegistry
+
+clump_info_registry = OperatorRegistry()
+
+def add_clump_info(name, function):
+    clump_info_registry[name] = ClumpInfoCallback(function)
+
+class ClumpInfoCallback(object):
+    r"""
+    A ClumpInfoCallback is a function that takes a clump, computes a 
+    quantity, and returns a string to be printed out for writing clump info.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _total_cells(clump):
+    n_cells = clump.data["index", "ones"].size
+    return "Cells: %d." % n_cells
+add_clump_info("total_cells", _total_cells)
+
+def _cell_mass(clump):
+    cell_mass = clump.data["gas", "cell_mass"].sum().in_units("Msun")
+    return "Mass: %e Msun." % cell_mass
+add_clump_info("cell_mass", _cell_mass)
+
+def _mass_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+    return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+def _volume_weighted_jeans_mass(clump):
+    jeans_mass = clump.data.quantities.weighted_average_quantity(
+        "jeans_mass", ("index", "cell_volume")).in_units("Msun")
+    return "Jeans Mass (volume-weighted): %.6e Msolar." % jeans_mass
+add_clump_info("volume_weighted_jeans_mass", _volume_weighted_jeans_mass)
+
+def _max_grid_level(clump):
+    max_level = clump.data["index", "grid_level"].max()
+    return "Max grid level: %d." % max_level
+add_clump_info("max_grid_level", _max_grid_level)
+
+def _min_number_density(clump):
+    min_n = clump.data["gas", "number_density"].min().in_units("cm**-3")
+    return "Min number density: %.6e cm^-3." % min_n
+add_clump_info("min_number_density", _min_number_density)
+
+def _max_number_density(clump):
+    max_n = clump.data["gas", "number_density"].max().in_units("cm**-3")
+    return "Max number density: %.6e cm^-3." % max_n
+add_clump_info("max_number_density", _max_number_density)
+
+def _distance_to_main_clump(clump, units="pc"):
+    master = clump
+    while master.parent is not None:
+        master = master.parent
+    master_com = clump.data.ds.arr(master.data.quantities.center_of_mass())
+    my_com = clump.data.ds.arr(clump.data.quantities.center_of_mass())
+    distance = np.sqrt(((master_com - my_com)**2).sum())
+    return "Distance from master center of mass: %.6e %s." % \
+      (distance.in_units(units), units)
+add_clump_info("distance_to_main_clump", _distance_to_main_clump)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/level_sets/clump_validators.py
--- /dev/null
+++ b/yt/analysis_modules/level_sets/clump_validators.py
@@ -0,0 +1,95 @@
+"""
+ClumpValidators and callbacks.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.data_point_utilities import FindBindingEnergy
+from yt.utilities.operator_registry import \
+    OperatorRegistry
+from yt.utilities.physical_constants import \
+    gravitational_constant_cgs as G
+
+clump_validator_registry = OperatorRegistry()
+
+def add_validator(name, function):
+    clump_validator_registry[name] = ClumpValidator(function)
+
+class ClumpValidator(object):
+    r"""
+    A ClumpValidator is a function that takes a clump and returns 
+    True or False as to whether the clump is valid and shall be kept.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, clump):
+        return self.function(clump, *self.args, **self.kwargs)
+    
+def _gravitationally_bound(clump, use_thermal_energy=True,
+                           use_particles=True, truncate=True):
+    "True if clump is gravitationally bound."
+
+    use_particles &= \
+      ("all", "particle_mass") in clump.data.ds.field_info
+    
+    bulk_velocity = clump.quantities.bulk_velocity(use_particles=use_particles)
+
+    kinetic = 0.5 * (clump["gas", "cell_mass"] *
+        ((bulk_velocity[0] - clump["gas", "velocity_x"])**2 +
+         (bulk_velocity[1] - clump["gas", "velocity_y"])**2 +
+         (bulk_velocity[2] - clump["gas", "velocity_z"])**2)).sum()
+
+    if use_thermal_energy:
+        kinetic += (clump["gas", "cell_mass"] *
+                    clump["gas", "thermal_energy"]).sum()
+
+    if use_particles:
+        kinetic += 0.5 * (clump["all", "particle_mass"] *
+            ((bulk_velocity[0] - clump["all", "particle_velocity_x"])**2 +
+             (bulk_velocity[1] - clump["all", "particle_velocity_y"])**2 +
+             (bulk_velocity[2] - clump["all", "particle_velocity_z"])**2)).sum()
+
+    potential = clump.data.ds.quan(G *
+        FindBindingEnergy(clump["gas", "cell_mass"].in_cgs(),
+                          clump["index", "x"].in_cgs(),
+                          clump["index", "y"].in_cgs(),
+                          clump["index", "z"].in_cgs(),
+                          truncate, (kinetic / G).in_cgs()),
+        kinetic.in_cgs().units)
+    
+    if truncate and potential >= kinetic:
+        return True
+
+    if use_particles:
+        potential += clump.data.ds.quan(G *
+            FindBindingEnergy(
+                clump["all", "particle_mass"].in_cgs(),
+                clump["all", "particle_position_x"].in_cgs(),
+                clump["all", "particle_position_y"].in_cgs(),
+                clump["all", "particle_position_z"].in_cgs(),
+                truncate, ((kinetic - potential) / G).in_cgs()),
+        kinetic.in_cgs().units)
+
+    return potential >= kinetic
+add_validator("gravitationally_bound", _gravitationally_bound)
+
+def _min_cells(clump, n_cells):
+    "True if clump has a minimum number of cells."
+    return (clump["index", "ones"].size >= n_cells)
+add_validator("min_cells", _min_cells)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -39,9 +39,9 @@
         node_ids.append(nid)
         values = g[field][sl].astype("float64")
         contour_ids = np.zeros(dims, "int64") - 1
-        gct.identify_contours(values, contour_ids, total_contours)
+        total_contours += gct.identify_contours(values, contour_ids,
+                                                total_contours)
         new_contours = tree.cull_candidates(contour_ids)
-        total_contours += new_contours.shape[0]
         tree.add_contours(new_contours)
         # Now we can create a partitioned grid with the contours.
         LE = (DLE + g.dds * gi).in_units("code_length").ndarray_view()
@@ -51,6 +51,8 @@
             LE, RE, dims.astype("int64"))
         contours[nid] = (g.Level, node.node_ind, pg, sl)
     node_ids = np.array(node_ids)
+    if node_ids.size == 0:
+        return 0, {}
     trunk = data_source.tiles.tree.trunk
     mylog.info("Linking node (%s) contours.", len(contours))
     link_node_contours(trunk, contours, tree, node_ids)

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -21,14 +21,12 @@
 
 from yt.config import ytcfg
 from yt.units.yt_array import YTArray, uconcatenate, array_like_field
-from yt.utilities.data_point_utilities import FindBindingEnergy
 from yt.utilities.exceptions import YTFieldNotFound
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_objects
 from yt.utilities.lib.Octree import Octree
 from yt.utilities.physical_constants import \
     gravitational_constant_cgs, \
-    mass_sun_cgs, \
     HUGE
 from yt.utilities.math_utils import prec_accum
 
@@ -237,14 +235,14 @@
           (("all", "particle_mass") in self.data_source.ds.field_info)
         vals = []
         if use_gas:
-            vals += [(data[ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data[ax] * data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_position_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_position_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -261,7 +259,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class BulkVelocity(DerivedQuantity):
     r"""
@@ -299,14 +297,15 @@
     def process_chunk(self, data, use_gas = True, use_particles = False):
         vals = []
         if use_gas:
-            vals += [(data["velocity_%s" % ax] * data["cell_mass"]).sum(dtype=np.float64)
+            vals += [(data["gas", "velocity_%s" % ax] * 
+                      data["gas", "cell_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["cell_mass"].sum(dtype=np.float64))
+            vals.append(data["gas", "cell_mass"].sum(dtype=np.float64))
         if use_particles:
-            vals += [(data["particle_velocity_%s" % ax] *
-                      data["particle_mass"]).sum(dtype=np.float64)
+            vals += [(data["all", "particle_velocity_%s" % ax] *
+                      data["all", "particle_mass"]).sum(dtype=np.float64)
                      for ax in 'xyz']
-            vals.append(data["particle_mass"].sum(dtype=np.float64))
+            vals.append(data["all", "particle_mass"].sum(dtype=np.float64))
         return vals
 
     def reduce_intermediate(self, values):
@@ -323,7 +322,7 @@
             y += values.pop(0).sum(dtype=np.float64)
             z += values.pop(0).sum(dtype=np.float64)
             w += values.pop(0).sum(dtype=np.float64)
-        return [v/w for v in [x, y, z]]
+        return self.data_source.ds.arr([v/w for v in [x, y, z]])
 
 class WeightedVariance(DerivedQuantity):
     r"""

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -16,6 +16,7 @@
 
 import types
 import numpy as np
+from contextlib import contextmanager
 
 from yt.funcs import *
 from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
@@ -718,6 +719,22 @@
             self.field_data[field] = self.base_object[field][ind]
 
     @property
+    def blocks(self):
+        # We have to take a slightly different approach here.  Note that all
+        # that .blocks has to yield is a 3D array and a mask.
+        for obj, m in self.base_object.blocks:
+            m = m.copy()
+            with obj._field_parameter_state(self.field_parameters):
+                for cond in self.conditionals:
+                    ss = eval(cond)
+                    m = np.logical_and(m, ss, m)
+            if not np.any(m): continue
+            yield obj, m
+
+    def cut_region(self, *args, **kwargs):
+        raise NotImplementedError
+
+    @property
     def _cond_ind(self):
         ind = None
         obj = self.base_object

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -22,10 +22,12 @@
         yield assert_equal, np.all(r["velocity_x"] > 0.25), True
         yield assert_equal, np.sort(dd["density"][t]), np.sort(r["density"])
         yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
-        r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
-        t2 = (r["temperature"] < 0.75)
-        yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
-        yield assert_equal, np.all(r2["temperature"] < 0.75), True
+        # We are disabling these, as cutting cut regions does not presently
+        # work
+        #r2 = r.cut_region( [ "obj['temperature'] < 0.75" ] )
+        #t2 = (r["temperature"] < 0.75)
+        #yield assert_equal, np.sort(r2["temperature"]), np.sort(r["temperature"][t2])
+        #yield assert_equal, np.all(r2["temperature"] < 0.75), True
 
         # Now we can test some projections
         dd = ds.all_data()

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/fields/geometric_fields.py
--- a/yt/fields/geometric_fields.py
+++ b/yt/fields/geometric_fields.py
@@ -207,18 +207,3 @@
              units="cm",
              display_field=False)
 
-    def _contours(field, data):
-        fd = data.get_field_parameter("contour_slices")
-        vals = data["index", "ones"] * -1
-        if fd is None or fd == 0.0:
-            return vals
-        for sl, v in fd.get(data.id, []):
-            vals[sl] = v
-        return vals
-    
-    registry.add_field(("index", "contours"),
-                       function=_contours,
-                       validators=[ValidateSpatial(0)],
-                       take_log=False,
-                       display_field=False)
-

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/utilities/lib/ContourFinding.pyx
--- a/yt/utilities/lib/ContourFinding.pyx
+++ b/yt/utilities/lib/ContourFinding.pyx
@@ -228,7 +228,7 @@
         cdef int i, n, ins
         cdef np.int64_t cid1, cid2
         # Okay, this requires lots of iteration, unfortunately
-        cdef ContourID *cur, *root
+        cdef ContourID *cur, *c1, *c2
         n = join_tree.shape[0]
         #print "Counting"
         #print "Checking", self.count()
@@ -253,6 +253,7 @@
                 print "  Inspected ", ins
                 raise RuntimeError
             else:
+                c1.count = c2.count = 0
                 contour_union(c1, c2)
 
     def count(self):
@@ -335,6 +336,7 @@
                                 c2 = container[offset]
                                 if c2 == NULL: continue
                                 c2 = contour_find(c2)
+                                cur.count = c2.count = 0
                                 contour_union(cur, c2)
                                 cur = contour_find(cur)
         for i in range(ni):
@@ -342,13 +344,13 @@
                 for k in range(nk):
                     c1 = container[i*nj*nk + j*nk + k]
                     if c1 == NULL: continue
-                    cur = c1
                     c1 = contour_find(c1)
                     contour_ids[i,j,k] = c1.contour_id
         
         for i in range(ni*nj*nk): 
             if container[i] != NULL: free(container[i])
         free(container)
+        return nc
 
 @cython.boundscheck(False)
 @cython.wraparound(False)
@@ -383,6 +385,7 @@
         if spos[i] <= vc.left_edge[i] or spos[i] >= vc.right_edge[i]: return 0
     return 1
 
+ at cython.cdivision(True)
 @cython.boundscheck(False)
 @cython.wraparound(False)
 cdef void construct_boundary_relationships(Node trunk, ContourTree tree, 
@@ -391,227 +394,68 @@
                 np.ndarray[np.int64_t, ndim=1] node_ids):
     # We only look at the boundary and find the nodes next to it.
     # Contours is a dict, keyed by the node.id.
-    cdef int i, j, nx, ny, nz, offset_i, offset_j, oi, oj, level
+    cdef int i, j, off_i, off_j, oi, oj, level, ax, ax0, ax1, n1, n2
     cdef np.int64_t c1, c2
     cdef Node adj_node
     cdef VolumeContainer *vc1, *vc0 = vcs[nid]
-    nx = vc0.dims[0]
-    ny = vc0.dims[1]
-    nz = vc0.dims[2]
-    cdef int s = (ny*nx + nx*nz + ny*nz) * 18
+    cdef int s = (vc0.dims[1]*vc0.dims[0]
+                + vc0.dims[0]*vc0.dims[2]
+                + vc0.dims[1]*vc0.dims[2]) * 18
     # We allocate an array of fixed (maximum) size
     cdef np.ndarray[np.int64_t, ndim=2] joins = np.zeros((s, 2), dtype="int64")
-    cdef int ti = 0
-    cdef int index
+    cdef int ti = 0, side
+    cdef int index, pos[3], my_pos[3]
     cdef np.float64_t spos[3]
 
-    # First the x-pass
-    for i in range(ny):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    # Adjust by -1 in x, then oi and oj in y and z
-                    get_spos(vc0, -1, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, 0, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-                    # This is outside our vc
-                    get_spos(vc0, nx, i + oi, j + oj, 0, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, nx - 1, i, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-    # Now y-pass
-    for i in range(nx):
-        for j in range(nz):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                if i == 0 and oi == -1: continue
-                if i == nx - 1 and oi == 1: continue
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    get_spos(vc0, i + oi, -1, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, 0, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
+    for ax in range(3):
+        ax0 = (ax + 1) % 3
+        ax1 = (ax + 2) % 3
+        n1 = vc0.dims[ax0]
+        n2 = vc0.dims[ax1]
+        for i in range(n1):
+            for j in range(n2):
+                for off_i in range(3):
+                    oi = off_i - 1
+                    if i == 0 and oi == -1: continue
+                    if i == n1 - 1 and oi == 1: continue
+                    for off_j in range(3):
+                        oj = off_j - 1
+                        if j == 0 and oj == -1: continue
+                        if j == n2 - 1 and oj == 1: continue
+                        pos[ax0] = i + oi
+                        pos[ax1] = j + oj
+                        my_pos[ax0] = i
+                        my_pos[ax1] = j
+                        for side in range(2):
+                            # We go off each end of the block.
+                            if side == 0:
+                                pos[ax] = -1
+                                my_pos[ax] = 0
+                            else:
+                                pos[ax] = vc0.dims[ax]
+                                my_pos[ax] = vc0.dims[ax]-1
+                            get_spos(vc0, pos[0], pos[1], pos[2], ax, spos)
+                            adj_node = _find_node(trunk, spos)
+                            vc1 = vcs[adj_node.node_ind]
+                            if spos_contained(vc1, spos):
+                                index = vc_index(vc0, my_pos[0], 
+                                                 my_pos[1], my_pos[2])
+                                c1 = (<np.int64_t*>vc0.data[0])[index]
+                                index = vc_pos_index(vc1, spos)
+                                c2 = (<np.int64_t*>vc1.data[0])[index]
+                                if c1 > -1 and c2 > -1:
+                                    if examined[adj_node.node_ind] == 0:
+                                        joins[ti,0] = i64max(c1,c2)
+                                        joins[ti,1] = i64min(c1,c2)
+                                    else:
+                                        joins[ti,0] = c1
+                                        joins[ti,1] = c2
+                                    ti += 1
 
-                    get_spos(vc0, i + oi, ny, j + oj, 1, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, ny - 1, j)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-
-    # Now z-pass
-    for i in range(nx):
-        for j in range(ny):
-            for offset_i in range(3):
-                oi = offset_i - 1
-                for offset_j in range(3):
-                    oj = offset_j - 1
-                    get_spos(vc0, i + oi,  j + oj, -1, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, 0)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
-
-                    get_spos(vc0, i + oi, j + oj, nz, 2, spos)
-                    adj_node = _find_node(trunk, spos)
-                    vc1 = vcs[adj_node.node_ind]
-                    if examined[adj_node.node_ind] == 0 and \
-                       spos_contained(vc1, spos):
-                        # This is outside our VC, as 0 is a boundary layer
-                        index = vc_index(vc0, i, j, nz - 1)
-                        c1 = (<np.int64_t*>vc0.data[0])[index]
-                        index = vc_pos_index(vc1, spos)
-                        c2 = (<np.int64_t*>vc1.data[0])[index]
-                        if c1 > -1 and c2 > -1:
-                            joins[ti,0] = i64max(c1,c2)
-                            joins[ti,1] = i64min(c1,c2)
-                            ti += 1
     if ti == 0: return
     new_joins = tree.cull_joins(joins[:ti,:])
     tree.add_joins(new_joins)
 
-cdef inline int are_neighbors(
-            np.float64_t x1, np.float64_t y1, np.float64_t z1,
-            np.float64_t dx1, np.float64_t dy1, np.float64_t dz1,
-            np.float64_t x2, np.float64_t y2, np.float64_t z2,
-            np.float64_t dx2, np.float64_t dy2, np.float64_t dz2,
-        ):
-    # We assume an epsilon of 1e-15
-    if fabs(x1-x2) > 0.5*(dx1+dx2): return 0
-    if fabs(y1-y2) > 0.5*(dy1+dy2): return 0
-    if fabs(z1-z2) > 0.5*(dz1+dz2): return 0
-    return 1
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def identify_field_neighbors(
-            np.ndarray[dtype=np.float64_t, ndim=1] field,
-            np.ndarray[dtype=np.float64_t, ndim=1] x,
-            np.ndarray[dtype=np.float64_t, ndim=1] y,
-            np.ndarray[dtype=np.float64_t, ndim=1] z,
-            np.ndarray[dtype=np.float64_t, ndim=1] dx,
-            np.ndarray[dtype=np.float64_t, ndim=1] dy,
-            np.ndarray[dtype=np.float64_t, ndim=1] dz,
-        ):
-    # We assume this field is pre-jittered; it has no identical values.
-    cdef int outer, inner, N, added
-    cdef np.float64_t x1, y1, z1, dx1, dy1, dz1
-    N = field.shape[0]
-    #cdef np.ndarray[dtype=np.object_t] joins
-    joins = [[] for outer in range(N)]
-    #joins = np.empty(N, dtype='object')
-    for outer in range(N):
-        if (outer % 10000) == 0: print outer, N
-        x1 = x[outer]
-        y1 = y[outer]
-        z1 = z[outer]
-        dx1 = dx[outer]
-        dy1 = dy[outer]
-        dz1 = dz[outer]
-        this_joins = joins[outer]
-        added = 0
-        # Go in reverse order
-        for inner in range(outer, 0, -1):
-            if not are_neighbors(x1, y1, z1, dx1, dy1, dz1,
-                                 x[inner], y[inner], z[inner],
-                                 dx[inner], dy[inner], dz[inner]):
-                continue
-            # Hot dog, we have a weiner!
-            this_joins.append(inner)
-            added += 1
-            if added == 26: break
-    return joins
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def extract_identified_contours(int max_ind, joins):
-    cdef int i
-    contours = []
-    for i in range(max_ind + 1): # +1 to get to the max_ind itself
-        contours.append(set([i]))
-        if len(joins[i]) == 0:
-            continue
-        proto_contour = [i]
-        for j in joins[i]:
-            proto_contour += contours[j]
-        proto_contour = set(proto_contour)
-        for j in proto_contour:
-            contours[j] = proto_contour
-    return contours
-
- at cython.boundscheck(False)
- at cython.wraparound(False)
-def update_flat_joins(np.ndarray[np.int64_t, ndim=2] joins,
-                 np.ndarray[np.int64_t, ndim=1] contour_ids,
-                 np.ndarray[np.int64_t, ndim=1] final_joins):
-    cdef np.int64_t new, old
-    cdef int i, j, nj, nf, counter
-    cdef int ci, cj, ck
-    nj = joins.shape[0]
-    nf = final_joins.shape[0]
-    for ci in range(contour_ids.shape[0]):
-        if contour_ids[ci] == -1: continue
-        for j in range(nj):
-            if contour_ids[ci] == joins[j,0]:
-                contour_ids[ci] = joins[j,1]
-                break
-        for j in range(nf):
-            if contour_ids[ci] == final_joins[j]:
-                contour_ids[ci] = j + 1
-                break
-
-
 @cython.boundscheck(False)
 @cython.wraparound(False)
 def update_joins(np.ndarray[np.int64_t, ndim=2] joins,

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/utilities/operator_registry.py
--- /dev/null
+++ b/yt/utilities/operator_registry.py
@@ -0,0 +1,26 @@
+"""
+Operation registry class
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import copy
+import types
+
+class OperatorRegistry(dict):
+    def find(self, op, *args, **kwargs):
+        if isinstance(op, types.StringTypes):
+            # Lookup, assuming string or hashable object
+            op = copy.deepcopy(self[op])
+            op.args = args
+            op.kwargs = kwargs
+        return op

diff -r c2a973002c3710c762dac708242b457ee4526114 -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -689,20 +689,20 @@
         nx, ny = plot.image._A.shape
         buff = np.zeros((nx,ny),dtype='float64')
         for i,clump in enumerate(reversed(self.clumps)):
-            mylog.debug("Pixelizing contour %s", i)
+            mylog.info("Pixelizing contour %s", i)
 
-            xf_copy = clump[xf].copy()
-            yf_copy = clump[yf].copy()
+            xf_copy = clump[xf].copy().in_units("code_length")
+            yf_copy = clump[yf].copy().in_units("code_length")
 
             temp = _MPL.Pixelize(xf_copy, yf_copy,
-                                 clump[dxf]/2.0,
-                                 clump[dyf]/2.0,
-                                 clump[dxf]*0.0+i+1, # inits inside Pixelize
+                                 clump[dxf].in_units("code_length")/2.0,
+                                 clump[dyf].in_units("code_length")/2.0,
+                                 clump[dxf].d*0.0+i+1, # inits inside Pixelize
                                  int(nx), int(ny),
                              (x0, x1, y0, y1), 0).transpose()
             buff = np.maximum(temp, buff)
         self.rv = plot._axes.contour(buff, np.unique(buff),
-                                     extent=extent,**self.plot_args)
+                                     extent=extent, **self.plot_args)
         plot._axes.hold(False)
 
 class ArrowCallback(PlotCallback):


https://bitbucket.org/yt_analysis/yt/commits/15dcf293ad7d/
Changeset:   15dcf293ad7d
Branch:      yt-3.0
User:        chummels
Date:        2014-07-23 01:57:16
Summary:     Fixing a header issue in the cookbook
Affected #:  1 file

diff -r 78ee86070249629bcdab9cfa9f08b37aaf00d736 -r 15dcf293ad7d4a7eed4c5b365f46425ea09c8d87 doc/source/cookbook/constructing_data_objects.rst
--- a/doc/source/cookbook/constructing_data_objects.rst
+++ b/doc/source/cookbook/constructing_data_objects.rst
@@ -5,7 +5,7 @@
 from a simulation.
 
 Creating Particle Filters
--------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Create particle filters based on the age of star particles in an isolated
 disk galaxy simulation.  Determine the total mass of each stellar age bin


https://bitbucket.org/yt_analysis/yt/commits/e3e7b69f36e4/
Changeset:   e3e7b69f36e4
Branch:      yt-3.0
User:        chummels
Date:        2014-07-26 02:30:22
Summary:     First checkin of objects docs.
Affected #:  1 file

diff -r 15dcf293ad7d4a7eed4c5b365f46425ea09c8d87 -r e3e7b69f36e45149c08526ae54e80dd073769a44 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,119 +1,143 @@
-.. _using-objects:
+XXX.. _using-objects:
 
-Using and Manipulating Objects and Fields
-=========================================
+Data Objects
+============
+
+What are Data Objects in yt?
+----------------------------
+
+Data objects (also called *Data Containers*) are used in yt as convenience 
+structures for grouping data in logical ways that make sense in the context 
+of the dataset as a whole.  Some of the data objects are geometrical groupings 
+of data (e.g. sphere, region--a 3D box, cylinder, etc.).  Others represent 
+data products derived from your dataset (e.g. slices, streamlines, surfaces).
+Still other data objects group multiple objects together or filter them
+(e.g. data dollection, cut region).  
 
 To generate standard plots, objects rarely need to be directly constructed.
 However, for detailed data inspection as well as hand-crafted derived data,
 objects can be exceptionally useful and even necessary.
 
-.. _types_of_fields:
+For geometric objects, if the shape intersectsXXX
 
-What Types of Fields are There?
--------------------------------
+How to Create an Object
+-----------------------
 
-``yt`` makes a distinction between two types of fields.
-
- * Fields it might expect to find on disk
- * Fields it has to generate in memory
-
-With the 2.3 release of ``yt``, the distinction between these has become more
-clear.  This enables much better specification of which fields are expected to
-exist, and to provide fallbacks for calculating them.  For instance you can now
-say, "temperature" might exist, but if it doesn't, here's how you calculate it.
-This also provides easier means of translating fields between different
-frontends.  For instance, FLASH may refer to the temperature field as "temp"
-while Enzo calls it "temperature".  Translator functions ensure that any
-derived field relying on "temp" or "temperature" works with both output types.
-
-When a field is requested, the dataset object first looks to see if that field
-exists on disk.  If it does not, it then queries the list of code-specific
-derived fields.  If it finds nothing there, it then defaults to examining the
-global set of derived fields.
-
-To add a derived field, which is not expected to necessarily exist on disk, use
-the standard construction:
+To create an object, you usually only need a loaded dataset, the name of 
+the object type, and the relevant parameters for your object.  Here is a common
+example for creating a ``Region`` object that covers all of your data volume.
 
 .. code-block:: python
 
-   add_field("specific_thermal_energy", function=_specific_thermal_energy,
-             units="ergs/g")
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   ad = ds.all_data()
 
-where ``_specific_thermal_energy`` is a python function that defines the field.
-
-.. _accessing-fields:
-
-Accessing Fields in Objects
----------------------------
-
-``yt`` utilizes load-on-demand objects to represent physical regions in space.
-(see :ref:`how-yt-thinks-about-data`.)  Data objects in ``yt`` all respect the following
-protocol for accessing data:
+Alternatively, we could create a sphere object of radius 1 kpc on location 
+[0.5, 0.5, 0.5] using the dataset quantity 1 kpc:
 
 .. code-block:: python
 
-   my_object["density"]
-
-where ``"density"`` can be any field name and ``"my_object"`` any one of
-the possible data containers listed at :ref:`available-objects`. For
-example, if we wanted to look at the temperature of cells within a
-spherical region of radius 10 kpc, centered at [0.5, 0.5, 0.5] in our
-simulation box, we would create a sphere object with:
-
-.. code-block:: python
-
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
-
-and then look at the temperature of its cells within it via:
-
-.. code-block:: python
-
-   print sp["temperature"]
-
-Information about how to create a new type of object can be found in
-:ref:`creating-objects`. The field is returned as a single, flattened
-array without spatial information.  The best mechanism for
-manipulating spatial data is the :class:`~yt.data_objects.data_containers.AMRCoveringGridBase` object.
-
-The full list of fields that are available can be found as a property of the
-Hierarchy or Static Output object that you wish to access.  This property is
-calculated every time the object is instantiated.  The full list of fields that
-have been identified in the output file, which need no processing (besides unit
-conversion) are in the property ``field_list`` and the full list of
-potentially-accessible derived fields is available in the property
-``derived_field_list``.  You can see these by examining the two properties:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_list
-   print ds.derived_field_list
-
-When a field is added, it is added to a container that hangs off of the
-dataset, as well.  All of the field creation options
-(:ref:`derived-field-options`) are accessible through this object:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_info["pressure"].get_units()
-
-This is a fast way to examine the units of a given field, and additionally you
-can use :meth:`yt.utilities.pydot.get_source` to get the source code:
-
-.. code-block:: python
-
-   field = ds.field_info["pressure"]
-   print field.get_source()
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], ds.quan(1, 'kpc'))
 
 .. _available-objects:
 
 Available Objects
 -----------------
 
-Objects are instantiated by direct access of a index.  Each of the objects
-that can be generated by a index are in fact fully-fledged data objects
-respecting the standard protocol for interaction.
+As noted above, there are numerous types of objects.  Here we group them
+into:
+
+* *Geometric Objects* - Data is selected based on spatial shapes in the dataset
+* *Filtering Objects* - Data is selected based on other field criteria
+* *Collection Objects* - Multiple objects grouped together
+* 
+
+Geometric Objects
+^^^^^^^^^^^^^^^^^
+
+0D
+""
+
+**Point** 
+    Aliased to :class:`~yt.data_objects.data_containers.YTPointBase`    
+    Usage: ``point(coords)``
+    A zero-dimensional point defined by a single cell at specified coordinates.
+
+1D
+""
+
+**Axis-Aligned Ray** (aliased to :class:`~yt.data_objects.data_containers.YTOrthoRayBase`)
+    | Usage: ``ortho_ray()``
+    | A one-dimensional line of data cells stretching through the full domain aligned with one of the x,y,z axes.
+
+**Arbitrary-Aligned Ray** (aliased to :class:`~yt.data_objects.data_containers.YTRayBase`)
+    | Usage: ``ray()``
+    | A one-dimensional line of data cells stretching through the full domain defined by arbitrary start and end coordinates.
+
+2D 
+""
+
+**Axis-Aligned Slice** (aliased to :class:`~yt.data_objects.data_containers.YTSliceBase`)
+    | Usage: ``slice()``
+
+**Arbitrary-Aligned Slice** (aliased to :class:`~yt.data_objects.data_containers.YTCuttingPlaneBase`)
+    | Usage: ``cutting()``
+
+3D
+""
+
+**Disk/Cylinder** (aliased to :class:`~yt.data_objects.data_containers.YTDiskBase`)
+    | Usage: ``disk()``
+
+**Box Region** (aliased to :class:`~yt.data_objects.data_containers.YTRegionBase`)
+    | Usage: ``region()``
+
+**Sphere** (aliased to :class:`~yt.data_objects.data_containers.YTSphereBase`)
+    | Usage: ``sphere()``
+
+**Ellipsoid** (aliased to :class:`~yt.data_objects.data_containers.YTEllipsoidBase`)
+    | Usage: ``ellipsoid()``
+
+**All Data** (aliased to :class:`~yt.data_objects.data_containers.YTRegionBase`)
+    | Usage: ``all_data()``
+
+Filtering and Grouping Objects
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+**Boolean Regions** (Note: not yet implemented in yt 3.0)
+    | Usage: ``boolean()``
+
+**Mesh Field Filter** (aliased to :class:`~yt.data_objects.data_containers.YTCutRegionBase`)
+    | Usage: ``cut_region()``
+
+**Collection of Data Objects** (aliased to :class:`~yt.data_objects.data_containers.YTDataCollectionBase`)
+    | Usage: ``data_collection()``
+
+Data Product Objects
+^^^^^^^^^^^^^^^^^^^^
+
+**Streamline** (aliased to :class:`~yt.data_objects.data_containers.YTStreamlineBase`)
+    | Usage: ``streamline()``
+
+**Projection** (aliased to :class:`~yt.data_objects.data_containers.YTQuadTreeProjBase`)
+    | Usage: ``proj()``
+
+**Fixed-Resolution Region** (aliased to :class:`~yt.data_objects.data_containers.YTCoveringGridBase`)
+    | Usage: ``covering_grid()``
+
+**Fixed-Resolution Region with Smoothing** (aliased to :class:`~yt.data_objects.data_containers.YTSmoothedCoveringGridBase`)
+    | Usage: ``smoothed_covering_grid()``
+
+**Fixed-Resolution Region for Particle Deposition** (aliased to :class:`~yt.data_objects.data_containers.YTArbitraryGridBase `)
+    | Usage: ``arbitrary_grid()``
+
+**Surface** (aliased to :class:`~yt.data_objects.data_containers.YTSurfaceBase`)
+    | Usage: ``surface()``
+
+
 
 The following objects are available, all of which hang off of the index
 object.  To access them, you would do something like this (as for a
@@ -125,7 +149,6 @@
    ds = yt.load("RedshiftOutput0005")
    reg = ds.region([0.5, 0.5, 0.5], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
 
-.. include:: _obj_docstrings.inc
 
 .. _boolean_data_objects:
 
@@ -413,3 +436,69 @@
 This method works for clumps, as well, and the entire clump index will be
 stored and restored upon load.
 
+.. _accessing-fields:
+
+Accessing Fields in Objects
+---------------------------
+
+``yt`` utilizes load-on-demand objects to represent physical regions in space.
+(see :ref:`how-yt-thinks-about-data`.)  Data objects in ``yt`` all respect the following
+protocol for accessing data:
+
+.. code-block:: python
+
+   my_object["density"]
+
+where ``"density"`` can be any field name and ``"my_object"`` any one of
+the possible data containers listed at :ref:`available-objects`. For
+example, if we wanted to look at the temperature of cells within a
+spherical region of radius 10 kpc, centered at [0.5, 0.5, 0.5] in our
+simulation box, we would create a sphere object with:
+
+.. code-block:: python
+
+   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
+
+and then look at the temperature of its cells within it via:
+
+.. code-block:: python
+
+   print sp["temperature"]
+
+Information about how to create a new type of object can be found in
+:ref:`creating-objects`. The field is returned as a single, flattened
+array without spatial information.  The best mechanism for
+manipulating spatial data is the :class:`~yt.data_objects.data_containers.AMRCoveringGridBase` object.
+
+The full list of fields that are available can be found as a property of the
+Hierarchy or Static Output object that you wish to access.  This property is
+calculated every time the object is instantiated.  The full list of fields that
+have been identified in the output file, which need no processing (besides unit
+conversion) are in the property ``field_list`` and the full list of
+potentially-accessible derived fields is available in the property
+``derived_field_list``.  You can see these by examining the two properties:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print ds.field_list
+   print ds.derived_field_list
+
+When a field is added, it is added to a container that hangs off of the
+dataset, as well.  All of the field creation options
+(:ref:`derived-field-options`) are accessible through this object:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print ds.field_info["pressure"].get_units()
+
+This is a fast way to examine the units of a given field, and additionally you
+can use :meth:`yt.utilities.pydot.get_source` to get the source code:
+
+.. code-block:: python
+
+   field = ds.field_info["pressure"]
+   print field.get_source()
+
+


https://bitbucket.org/yt_analysis/yt/commits/fa87d397f01c/
Changeset:   fa87d397f01c
Branch:      yt-3.0
User:        chummels
Date:        2014-07-26 02:31:21
Summary:     Merging.
Affected #:  141 files

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -588,7 +588,7 @@
 FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.1.3'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.1.0'
+IPYTHON='ipython-2.1.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
@@ -616,7 +616,7 @@
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca  ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
+echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/_static/agogo_yt.css
--- a/doc/source/_static/agogo_yt.css
+++ /dev/null
@@ -1,41 +0,0 @@
- at import url("agogo.css");
- at import url("http://fonts.googleapis.com/css?family=Crimson+Text");
- at import url("http://fonts.googleapis.com/css?family=Droid+Sans");
-
-div.document ul {
-  margin-left: 1.5em;
-  margin-top: 0.0em;
-  margin-bottom: 1.0em;
-}
-
-div.document li.toctree-l1 {
-  margin-bottom: 0.5em;
-}
-
-table.contentstable {
-  width: 100%;
-}
-
-table.contentstable td {
-  padding: 5px 15px 0px 15px;
-}
-
-table.contentstable tr {
-  border-bottom: 1px solid black;
-}
-
-a.biglink {
-  line-height: 1.2em;
-}
-
-a tt.xref {
-  font-weight: bolder;
-}
-
-table.docutils {
-  width: 100%;
-}
-
-table.docutils td {
-  width: 50%;
-}

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/_static/custom.css
--- /dev/null
+++ b/doc/source/_static/custom.css
@@ -0,0 +1,8 @@
+blockquote {
+    font-size: 16px;
+    border-left: none;
+}
+
+dd {
+    margin-left: 30px;
+}
\ No newline at end of file

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/_templates/layout.html
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -35,3 +35,5 @@
     </div>
 {%- endblock %}
 
+{# Custom CSS overrides #}
+{% set bootswatch_css_custom = ['_static/custom.css'] %}

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e792ad188f59161aa3ff4cdbb32cad75142b2e6b4062dfa1d8c12b3172fcf4e9"
+  "signature": "sha256:c423bcb9e3370a4581cbaaa8e764b95ec13e665aa3b46d452891d76cc79d7acf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -296,7 +296,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_ds =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
       "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
@@ -391,12 +391,13 @@
      "input": [
       "%matplotlib inline\n",
       "import matplotlib.pyplot as plt\n",
+      "import numpy as np\n",
       "\n",
-      "plt.plot(radius, temperature)\n",
+      "plt.plot(np.array(radius), np.array(temperature))\n",
       "\n",
       "plt.semilogy()\n",
-      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
-      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n",
+      "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n",
       "\n",
       "plt.show()"
      ],

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -2,185 +2,135 @@
 
 Clump Finding
 =============
-.. sectionauthor:: Britton Smith <britton.smith at colorado.edu>
 
-``yt`` has the ability to identify topologically disconnected structures based in a dataset using 
-any field available.  This is powered by a contouring algorithm that runs in a recursive 
-fashion.  The user specifies the initial data object in which the clump-finding will occur, 
-the field over which the contouring will be done, the upper and lower limits of the 
-initial contour, and the contour increment.
+The clump finder uses a contouring algorithm to identified topologically 
+disconnected structures within a dataset.  This works by first creating a 
+single contour over the full range of the contouring field, then continually 
+increasing the lower value of the contour until it reaches the maximum value 
+of the field.  As disconnected structures are identified as separate contoures, 
+the routine continues recursively through each object, creating a hierarchy of 
+clumps.  Individual clumps can be kept or removed from the hierarchy based on 
+the result of user-specified functions, such as checking for gravitational 
+boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder begins by creating a single contour of the specified field over the entire 
-range given.  For every isolated contour identified in the initial iteration, contouring is 
-repeated with the same upper limit as before, but with the lower limit increased by the 
-specified increment.  This repeated for every isolated group until the lower limit is equal 
-to the upper limit.
+The clump finder requires a data container and a field over which the 
+contouring is to be performed.
 
-Often very tiny clumps can appear as groups of only a few cells that happen to be slightly 
-overdense (if contouring over density) with respect to the surrounding gas.  The user may 
-specify criteria that clumps must meet in order to be kept.  The most obvious example is 
-selecting only those clumps that are gravitationally bound.
+.. code:: python
 
-Once the clump-finder has finished, the user can write out a set of quantities for each clump in the 
-index.  Additional info items can also be added.  We also provide a recipe
-for finding clumps in :ref:`cookbook-find_clumps`.
+   import yt
+   from yt.analysis_modules.level_sets.api import *
 
-Treecode Optimization
----------------------
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.1
+   data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                         (8, 'kpc'), (1, 'kpc'))
 
-As mentioned above, the user has the option to limit clumps to those that are
-gravitationally bound.
-The correct and accurate way to calculate if a clump is gravitationally
-bound is to do the full double sum:
+   master_clump = Clump(data_source, ("gas", "density"))
 
-.. math::
+At this point, every isolated contour will be considered a clump, 
+whether this is physical or not.  Validator functions can be added to 
+determine if an individual contour should be considered a real clump.  
+These functions are specified with the ``Clump.add_validator`` function.  
+Current, two validators exist: a minimum number of cells and gravitational 
+boundedness.
 
-  PE = \Sigma_{i=1}^N \Sigma_{j=i}^N \frac{G M_i M_j}{r_{ij}}
+.. code:: python
 
-where :math:`PE` is the gravitational potential energy of :math:`N` cells,
-:math:`G` is the
-gravitational constant, :math:`M_i` is the mass of cell :math:`i`, 
-and :math:`r_{ij}` is the distance
-between cell :math:`i` and :math:`j`.
-The number of calculations required for this calculation
-grows with the square of :math:`N`. Therefore, for large clumps with many cells, the
-test for boundedness can take a significant amount of time.
+   master_clump.add_validator("min_cells", 20)
 
-An effective way to greatly speed up this calculation with minimal error
-is to use the treecode approximation pioneered by
-`Barnes and Hut (1986) <http://adsabs.harvard.edu/abs/1986Natur.324..446B>`_.
-This method of calculating gravitational potentials works by
-grouping individual masses that are located close together into a larger conglomerated
-mass with a geometric size equal to the distribution of the individual masses.
-For a mass cell that is sufficiently distant from the conglomerated mass,
-the gravitational calculation can be made using the conglomerate, rather than
-each individual mass, which saves time.
+   master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-The decision whether or not to use a conglomerate depends on the accuracy control
-parameter ``opening_angle``. Using the small-angle approximation, a conglomerate
-may be used if its geometric size subtends an angle no greater than the
-``opening_angle`` upon the remote mass. The default value is
-``opening_angle = 1``, which gives errors well under 1%. A value of 
-``opening_angle = 0`` is identical to the full O(N^2) method, and larger values
-will speed up the calculation and sacrifice accuracy (see the figures below).
+As many validators as desired can be added, and a clump is only kept if all 
+return True.  If not, a clump is remerged into its parent.  Custom validators 
+can easily be added.  A validator function must only accept a ``Clump`` object 
+and either return True or False.
 
-The treecode method is iterative. Conglomerates may themselves form larger
-conglomerates. And if a larger conglomerate does not meet the ``opening_angle``
-criterion, the smaller conglomerates are tested as well. This iteration of 
-conglomerates will
-cease once the level of the original masses is reached (this is what happens
-for all pair calculations if ``opening_angle = 0``).
+.. code:: python
 
-Below are some examples of how to control the usage of the treecode.
+   def _minimum_gas_mass(clump, min_mass):
+       return (clump["gas", "cell_mass"].sum() >= min_mass)
+   add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-This example will calculate the ratio of the potential energy to kinetic energy
-for a spherical clump using the treecode method with an opening angle of 2.
-The default opening angle is 1.0:
+The ``add_validator`` function adds the validator to a registry that can 
+be accessed by the clump finder.  Then, the validator can be added to the 
+clump finding just like the others.
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  ds = load("DD0000")
-  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities.is_bound(truncate=False, include_thermal_energy=True,
-      treecode=True, opening_angle=2.0)
+.. code:: python
 
-This example will accomplish the same as the above, but will use the full
-N^2 method.
+   master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  ds = load("DD0000")
-  sp = ds.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities.is_bound(truncate=False, include_thermal_energy=True,
-      treecode=False)
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
+and maximum of the contouring field, and the step size.  The lower value of the 
+contour finder will be continually multiplied by the step size.
 
-Here the treecode method is specified for clump finding (this is default).
-Please see the link above for the full example of how to find clumps (the
-trailing backslash is important!):
+.. code:: python
 
-.. code-block:: python
-  
-  function_name = 'self.data.quantities.is_bound(truncate=True, \
-      include_thermal_energy=True, treecode=True, opening_angle=2.0) > 1.0'
-  master_clump = amods.level_sets.Clump(data_source, None, field,
-      function=function_name)
+   c_min = data_source["gas", "density"].min()
+   c_max = data_source["gas", "density"].max()
+   step = 2.0
+   find_clumps(master_clump, c_min, c_max, step)
 
-To turn off the treecode, of course one should turn treecode=False in the
-example above.
+After the clump finding has finished, the master clump will represent the top 
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+with its own ``children`` attribute, and so on.
 
-Treecode Speedup and Accuracy Figures
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+A number of helper routines exist for examining the clump hierarchy.
 
-Two datasets are used to make the three figures below. Each is a zoom-in
-simulation with high resolution in the middle with AMR, and then lower
-resolution static grids on the periphery. In this way they are very similar to
-a clump in a full-AMR simulation, where there are many AMR levels stacked
-around a density peak. One dataset has a total of 3 levels of AMR, and
-the other has 10 levels, but in other ways are very similar.
+.. code:: python
 
-The first figure shows the effect of varying the opening angle on the speed
-and accuracy of the treecode. The tests were performed using the L=10 
-dataset on a clump with approximately 118,000 cells. The speedup of up the
-treecode is in green, and the accuracy in blue, with the opening angle
-on the x-axis.
+   # Write a text file of the full hierarchy.
+   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
-With an ``opening_angle`` = 0, the accuracy is perfect, but the treecode is
-less than half as fast as the brute-force method. However, by an
-``opening_angle`` of 1, the treecode is now nearly twice as fast, with
-about 0.2% error. This trend continues to an ``opening_angle`` 8, where
-large opening angles have no effect due to geometry.
+   # Write a text file of only the leaf nodes.
+   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-.. image:: _images/TreecodeOpeningAngleBig.png
-   :width: 450
-   :height: 400
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
 
-Note that the accuracy is always below 1. The treecode will always underestimate
-the gravitational binding energy of a clump.
+``Clump`` objects can be used like all other data containers.
 
-In this next figure, the ``opening_angle`` is kept constant at 1, but the
-number of cells is varied on the L=3 dataset by slowly expanding a spherical
-region of analysis. Up to about 100,000 cells,
-the treecode is actually slower than the brute-force method. This is due to
-the fact that with fewer cells, smaller geometric distances,
-and a shallow AMR index, the treecode
-method has very little chance to be applied. The calculation is overall
-slower due to the overhead of the treecode method & startup costs. This
-explanation is further strengthened by the fact that the accuracy of the
-treecode method stay perfect for the first couple thousand cells, indicating
-that the treecode method is not being applied over that range.
+.. code:: python
 
-Once the number of cells gets high enough, and the size of the region becomes
-large enough, the treecode method can work its magic and the treecode method
-becomes advantageous.
+   print leaf_clumps[0]["gas", "density"]
+   print leaf_clumps[0].quantities.total_mass()
 
-.. image:: _images/TreecodeCellsSmall.png
-   :width: 450
-   :height: 400
+The writing functions will write out a series or properties about each 
+clump by default.  Additional properties can be appended with the 
+``Clump.add_info_item`` function.
 
-The saving grace to the figure above is that for small clumps, a difference of
-50% in calculation time is on the order of a second or less, which is tiny
-compared to the minutes saved for the larger clumps where the speedup can
-be greater than 3.
+.. code:: python
 
-The final figure is identical to the one above, but for the L=10 dataset.
-Due to the higher number of AMR levels, which translates into more opportunities
-for the treecode method to be applied, the treecode becomes faster than the
-brute-force method at only about 30,000 cells. The accuracy shows a different
-behavior, with a dip and a rise, and overall lower accuracy. However, at all
-times the error is still well under 1%, and the time savings are significant.
+   master_clump.add_info_item("total_cells")
 
-.. image:: _images/TreecodeCellsBig.png
-   :width: 450
-   :height: 400
+Just like the validators, custom info items can be added by defining functions 
+that minimally accept a ``Clump`` object and return a string to be printed.
 
-The figures above show that the treecode method is generally very advantageous,
-and that the error introduced is minimal.
+.. code:: python
+
+   def _mass_weighted_jeans_mass(clump):
+       jeans_mass = clump.data.quantities.weighted_average_quantity(
+           "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+       return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+   add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+Then, add it to the list:
+
+.. code:: python
+
+   master_clump.add_info_item("mass_weighted_jeans_mass")
+
+By default, the following info items are activated: **total_cells**, 
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
+**max_grid_level**, **min_number_density**, **max_number_density**, and 
+**distance_to_main_clump**.
+
+Clumps can be visualized using the ``annotate_clumps`` callback.
+
+.. code:: python
+
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+                           center='c', width=(20,'kpc'))
+   prj.annotate_clumps(leaf_clumps)
+   prj.save('clumps')

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -11,4 +11,5 @@
    halo_transition
    halo_finding
    halo_mass_function
+   halo_merger_tree
    halo_analysis_example

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -1,3 +1,5 @@
+.. _halo-analysis-example:
+
 Using HaloCatalogs to do Analysis
 ---------------------------------
 

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -8,7 +8,7 @@
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
 For a direct translation of various halo analysis tasks using yt-2.x
-to yt-3.0 please see :ref:`halo_transition`.
+to yt-3.0 please see :ref:`halo-transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
 catalog through data_ds. These halos can be found using friends-of-friends,

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -17,7 +17,7 @@
 In order to run this extension on a dataset, the haloes need to be located
 (using HOP, FOF or Parallel HOP, see :ref:`halo_finding`),
 and their virial masses determined using the
-HaloProfiler (see :ref:`halo_profiling`).
+HaloProfiler.
 Please see the step-by-step how-to which puts these steps together
 (:ref:`hmf_howto`).
 If an optional analytical fit is desired, the correct initial

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_merger_tree.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_merger_tree.rst
@@ -0,0 +1,6 @@
+.. _merger_tree:
+
+Halo Merger Tree
+================
+
+.. note:: As of :code:`yt-3.0`, the halo merger tree functionality has been removed to be replaced by machinery that works with the ``HaloCatalog`` object.  In the mean time, this functionality can still be found in :code:`yt-2.x`.

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/halo_transition.rst
--- a/doc/source/analyzing/analysis_modules/halo_transition.rst
+++ b/doc/source/analyzing/analysis_modules/halo_transition.rst
@@ -1,3 +1,4 @@
+.. _halo-transition:
 
 Getting up to Speed with Halo Analysis in yt-3.0
 ================================================
@@ -45,9 +46,10 @@
 Specifically, all quantities can be accessed as shown:
 
 .. code-block:: python
-   from yt.mods import *
+
+   import yt
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.create()
    ad = hc.all_data()
@@ -62,9 +64,10 @@
 is shown below
 
 .. code-block:: python
-   from yt.mods import *
+
+   import yt
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.add_filter("quantity_value", "particle_mass", ">", 1e13, "Msun")
    hc.create()
@@ -76,7 +79,7 @@
 profiling functionality is now completely contained within the
 halo catalog. A complete example of how to profile halos by 
 radius using the new infrastructure is given in 
-:ref:`halo_analysis_example`. 
+:ref:`halo-analysis-example`. 
 
 Plotting Halos
 --------------
@@ -86,14 +89,15 @@
 passed to the annotate halo call rather than a halo list.
 
 .. code-block:: python
-   from yt.mods import *
+
+   import yt
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
 
-   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
    hc.create()
 
-   prj = ProjectionPlot(data_ds, 'z', 'density')
+   prj = yt.ProjectionPlot(data_ds, 'z', 'density')
    prj.annotate_halos(hc)
    prj.save()
 

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ b/doc/source/analyzing/analysis_modules/hmf_howto.rst
@@ -38,7 +38,7 @@
 Halo Profiling
 --------------
 
-The halo profiler (:ref:`halo_profiling`) is a powerful tool that can analyze
+The halo profiler is a powerful tool that can analyze
 haloes in many ways. It is beneficial to read its documentation to become
 familiar with it before using it.
 For this exercise, only the virial mass of each
@@ -58,7 +58,7 @@
 
 This script limits the output to virialized haloes with mass greater than or
 equal to 1e8 solar masses. If you run into problems, try pre-filtering problem
-haloes (:ref:`halo_profiler_pre_filters`).
+haloes.
 
 Halo Mass Function
 ------------------

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/merger_tree.rst
--- a/doc/source/analyzing/analysis_modules/merger_tree.rst
+++ /dev/null
@@ -1,767 +0,0 @@
-.. _merger_tree:
-
-Halo Merger Tree
-================
-
-.. note:: At the moment the merger tree is not yet implemented using new 
-    halo catalog functionality. 
-
-The Halo Merger Tree extension is capable of building a database of halo mergers
-over a set of time-ordered Enzo datasets. The fractional contribution of older
-'parent' halos to younger 'child' halos is calculated by comparing the unique
-index labels of their constituent particles. The data is stored in a
-`SQLite <http://sqlite.org/>`_ database which enables the use of powerful
-and fast SQL queries over all the halos.
-
-General Overview
-----------------
-
-The first requirement is a set of sequential datasets.
-The detail of the merger tree is increased as the difference in
-time between snapshots is reduced, at the cost of higher computational effort
-for the tree itself and and disk usage for the snapshots.
-The merger tree relies on the output of one of the Halo Finders in yt, and the
-user can choose which one to use.
-The merger tree is capable of running the halo finder if it hasn't already
-been done.
-Once halo finding is accomplished for all the data snapshots, the halo
-lineage is calculated by comparing the particle membership of halos between
-pairs of time steps.
-The halo data and tree data is stored in the SQLite database.
-
-Clearly, another requirement is that Python has the
-`sqlite3 library <http://docs.python.org/library/sqlite3.html>`_
-installed.
-This should be built along with everything else yt needs
-if the ``install_script.sh`` was used.
-
-The merger tree can be calculated in parallel, and if necessary, it will run
-the halo finding in parallel as well. Please see the note below about the
-special considerations needed for Network File Systems.
-
-There is a convenience-wrapper for querying the database, called
-``MergerTreeConnect``.
-It simplifies accessing data in the database.
-
-There are two output classes for the merger tree. The ``MergerTreeDotOutput`` class
-outputs the tree for a user-specified subset of halos to a
-`Graphviz format <http://graphviz.org/>`_ file.
-Graphviz is an open-source package for visualizing connected objects in a
-graphical way.
-There are binary distributions for all major operating systems.
-It is also possible to dump the contents of the SQLite database to a simple text file
-with the ``MergerTreeTextOutput`` class.
-The data is saved in columnar format.
-
-Conceptual Primer
-~~~~~~~~~~~~~~~~~
-
-The best way to view the merger tree extension is as a two-part process.
-First, the merger tree is built and stored in the database.
-This process can be quite time consuming, depending on the size of the simulation,
-and the number and size of halos found in the snapshots.
-This is not a process one wants to do very often, and why it is separate
-from the analysis parts.
-
-The second part is actually a many-part process, which is the analysis of the
-merger tree itself.
-The first step is computationally intensive, but the analysis step
-is user-intensive.
-The user needs to decide what to pull out of the merger tree
-and figure out how to extract the needed data with SQL statements.
-Once an analysis pipeline is written, it should run very fast for even
-very large databases.
-
-A Note About Network File Systems
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Accessing a SQLite database stored on a Network (or Distributed) File System (NFS)
-is a risky thing to do, particularly if more than one task wants to write
-at the same time (`see more here <http://www.sqlite.org/lockingv3.html#how_to_corrupt>`_).
-NFS disks can store files on multiple physical hard drives, and it can take time
-for changes made by one task to appear to all the parallel tasks.
-Only one task of the merger tree ever interacts with the database,
-so these dangers are minimal,
-but in general it's a good idea to know something about the disk used to
-store the database.
-
-In general, it is recommended to keep the database on a 'real disk' 
-(/tmp for example, if all the tasks are on the same SMP node,
-or RAM disk for extra speed) if possible,
-but it should work on a NFS disk as well.
-If a temporary disk is used to store the database while it's being built,
-remember to copy the file to a permanent disk after the merger tree script
-is finished.
-
-
-Running and Using the Halo Merger Tree
---------------------------------------
-
-It is very simple to create a merger tree database for a series of snapshots.
-The most difficult part is creating an ordered list of Enzo restart files.
-There are two ways to do it, by hand or with the EnzoSimulation extension.
-
-By Hand
-~~~~~~~
-
-Here is an example of how to build the list and build the database by hand.
-Here, the snapshots are stored in directories named DD????, and the enzo
-restart file named data????, where ???? is a four digit zero-padded integer.
-The final snapshot considered (most progressed in time) is DD0116,
-and the earliest that will be examined is DD0100.
-The database will be saved to ``/path/to/database/halos.db``.
-This example below works identically in serial or in parallel.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  from yt.analysis_modules.halo_finding.api import *
-
-  files = []
-  start = 100
-  finish = 116
-  for i in range(start, finish + 1):
-      files.append('/path/to/snapshots/DD%04d/data%04d' % (i, i))
-
-  MergerTree(restart_files=files, database='/path/to/database/halos.db')
-
-If the halos have not been found previously for the snapshots, the halo finder
-will be run automatically. See the note about this below.
-
-Using EnzoSimulation
-~~~~~~~~~~~~~~~~~~~~
-
-Here is how to build the input list of restart files using the EnzoSimulation
-extension.
-It is possible to set range and interval between snapshots.
-Please see the EnzoSimulation
-documentation (:ref:`analyzing-an-entire-simulation`) for details.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  from yt.analysis_modules.halo_finding.api import *
-  import yt.analysis_modules.simulation_handler.api as ES
-  
-  es = ES.EnzoSimulation('/path/to/snapshots/simulation.par')
-  
-  files = []
-  for output in es.allOutputs:
-      files.append(output['filename'])
-
-  MergerTree(restart_files=files, database='/path/to/database/halos.db')
-
-Merger Tree Parallelism
------------------------
-
-If the halos are to be found during the course of building the merger tree,
-run with an appropriate number of tasks to the size of the dataset and the
-halo finder used.
-The speed of the merger tree itself,
-which compares halo membership in parallel very effectively,
-is almost completely constrained by the read/write times of the SQLite file.
-In tests with the halos pre-located, there is not much speedup beyond two MPI tasks.
-There is no negative effect with running the merger tree with more tasks (which is
-why if halos are to be found by the merger tree, the merger tree should be
-run with as many tasks as that step requires), and indeed if the simulation
-is a large one, running in parallel does provide memory parallelism,
-which is important.
-
-How The Database Is Handled In Analysis Restarts
-------------------------------------------------
-
-The Merger Tree is designed to allow the merger tree database to be built
-incrementally.
-For example, if a simulation is currently being run, the merger
-tree database can be built for the available datasets, and when new ones are
-created, the database extended to include them.
-So if there are going to be
-60 data snapshots total (indexed (0, 1, 2, ..., 59)), and only 50 are saved when the
-tree is first built, the analysis should be done on datasets [0, 49].
-If the last ten become available, re-run the merger tree on datasets [49, 59]
-referencing the same database as before.
-By referencing the same database as before, work does not need to be repeated.
-
-If the merger tree process is interrupted before completion (say, if the 
-jobs walltime is exceeded and the scheduler kills it), just run the exact
-same job again.
-The merger tree will check to see what work has already been completed, and
-resume where it left off.
-
-Additional Parameters
-~~~~~~~~~~~~~~~~~~~~~
-
-When calling ``MergerTree``, there are three parameters that control how the
-halo finder is run, if it needs to be run.
-
-  * ``halo_finder_function`` (name) - Which of the halo finders (:ref:`halo_finding`)
-    to use. Default: ``HaloFinder`` (HOP).
-  * ``halo_finder_threshold`` (float) - When using HOP or Parallel HOP, this sets the
-    threshold used. Default: 80.0.
-  * ``FOF_link_length`` (float) - When using Friends of Friends (FOFHaloFinder), this sets
-    the inter-particle link length used. Default: 0.2.
-  * ``dm_only`` (bool) - Whether to include stars (False), or only the dark
-    matter particles when building halos (True).
-    Default: False.
-  * ``refresh`` (bool) - If set to True, this will run the halo finder and
-    rebuild the database regardless of whether or not the halo files or
-    database exist on disk already.
-    Default: False.
-  * ``index`` (bool) - Whether to add an index to the SQLite file. True makes
-    SQL searches faster at the cost of additional disk space. Default=True.
-
-Example using Parallel HOP:
-
-.. code-block:: python
-
-  MergerTree(restart_files=files, database='/path/to/database/halos.db',
-      halo_finder_function=parallelHF, halo_finder_threshold=100.)
-
-Pre-Computing Halos
-~~~~~~~~~~~~~~~~~~~
-
-If halo finding is to happen before the merger tree is calculated, and the
-work is not to be wasted, special care
-should be taken to ensure that all the data required for the merger tree is
-saved.
-By default, the merger tree looks for files that begin with the name ``MergerHalos``
-in the same directory as each Enzo restart file,
-and if those files are missing or renamed, halo finding will be performed again.
-If ``halos`` is the list of halos returned by the halo finder, these three
-commands should be called to save the needed data:
-
-.. code-block:: python
-
-  halos.write_out('MergerHalos.out')
-  halos.write_particle_lists('MergerHalos')
-  halos.write_particle_lists_txt('MergerHalos')
-
-There is a convenience function that will call the three functions above
-at one time:
-
-.. code-block:: python
-
-  halos.dump('MergerHalos')
-
-Please see the documents on halo finding for more information on what these
-commands do (:ref:`halo_finding`).
-
-Accessing Data in the Database
-------------------------------
-
-SQLite databases support nearly all of the standard SQL queries.
-It is possible to write very complicated and powerful SQL queries, but below
-only simple examples will are shown. Please see other resources (WWW, books) for
-more on how to write SQL queries.
-
-It is possible to read and modify a SQLite database from the command line using
-the ``sqlite3`` command (e.g. ``sqlite3 database.db``). It can be very convenient
-to use this to quickly inspect a database, but is not suitable for extracting or inserting
-large amounts of data. There are many examples (again, see the WWW or books)
-available on how to use the command line ``sqlite3`` command.
-
-The table containing halo data in the database is named 'Halos'.
-All queries for halo data will come from this table.
-The table has these columns:
-
-  #. ``GlobalHaloID`` (int) - A fully-unique identifier for the halo.
-  #. ``SnapCurrentTimeIdentifier`` (int) - An unique time identifier for the snapshot
-     the halo comes from. Equivalent to 'CurrentTimeIdentifier' from the Enzo
-     restart file.
-  #. ``SnapZ`` (float) - The redshift for the halo.
-  #. ``SnapHaloID`` (int) - The halo ID for the halo taken from the output of the
-     halo finder (i.e. 'halos.write_out("HopAnalysis.out")'). It is unique for halos
-     in the same snapshot, but not unique across the full database.
-  #. ``HaloMass`` (float) - The total mass of dark matter in the halo as
-     identified by the halo finder.
-  #. ``NumPart`` (int) - Number of dark matter particles in the halo as identified
-     by the halo finder.
-  #. ``CenMassX``, 
-  #. ``CenMassY``,
-  #. ``CenMassZ`` (float) - The location of the center of mass of the halo in code units.
-  #. ``BulkVelX``,
-  #. ``BulkVelY``,
-  #. ``BulkVelZ`` (float) - The velocity of the center of mass of the halo in
-     cgs units.
-  #. ``MaxRad`` (float) - The distance from the center of mass to the most
-     remote particle in the halo in code units.
-  #. ``ChildHaloID0`` (int) - The GlobalHaloID of the child halo which receives
-     the greatest proportion of particles from this halo.
-  #. ``ChildHaloFrac0`` (float) - The fraction by mass of particles from this
-     (parent) halo that goes to the child halo recorded in ChildHaloID0.  If
-     all the particles from this parent halo goes to ChildHaloID0, this number will
-     be 1.0, regardless of the mass of the child halo.
-  #. ``ChildHaloID[1-4]``, ``ChildHaloFrac[1-4]`` (int, float) - Similar to the
-     columns above, these store the second through fifth greatest recipients of
-     particle mass from this parent halo.
-
-.. warning::
-
-   A value of -1 in any of the ``ChildHaloID`` columns corresponds to
-   a fake (placeholder) child halo entry. There is no halo with an ID equal to -1.
-   This is used during the merger tree construction,
-   and must be accounted for when constructing SQL queries of the database.
-
-To get the data for the most massive halo at the end of the simulation,
-there is a convenience class that simplifies database access. Using it, a query
-might look like this:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-
-  mtc = MergerTreeConnect(database='halos.db')
-  line = "SELECT * FROM Halos WHERE SnapZ=0.0 AND SnapHaloID=0;"
-  results = mtc.query(line)
-
-``results`` is a list containing a singular tuple containing the values for that halo in
-the same order as
-given above for the columns.
-
-Another way to get the same information is to use one of the convenience functions.
-The following example shows how to do this:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-
-  mtc = MergerTreeConnect(database='halos.db')
-  this_halo = mtc.get_GlobalHaloID(0, 0.0)
-
-The first term of ``get_GlobalHaloID`` is the ``SnapHaloID`` for the halo of
-interest, and the second is the redshift of interest.
-The results are stored in ``this_halo`` as an integer.
-
-If all that is wanted is a few of the columns, this slight modification below
-will retrieve only the desired data. In general, it is a good idea to retrieve
-only the columns that will actually be used. Requesting all the columns (with
-``*``) requires more reads from disk and slows down the query.
-
-.. code-block:: python
-
-  line = "SELECT NumPart, GlobalHaloID FROM Halos WHERE SnapZ=0.0 AND SnapHaloID=0;"
-  results = mtc.query(line)
-
-``results`` is a list containing a single tuple containing two items, the values for 
-``NumPart`` first and ``GlobalHaloID`` second.
-
-There is also a convenience function that will retrieve all the data columns
-for a given halo.
-The input of the function is the ``GlobalHaloID`` for the
-halo of interest, and it returns a dictionary where the keys are the names
-of the data columns, and the values are the entries in the database.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-
-  mtc = MergerTreeConnect(database='halos.db')
-  info = mtc.get_halo_info(1544)
-  print info
-  {'BulkVelX': -32759799.359999999,
-   'BulkVelY': -28740239.109999999,
-   'BulkVelZ': -20066000.690000001,
-   'CenMassX': 0.23059111360000001,
-   'CenMassY': 0.4061139809,
-   'CenMassZ': 0.80882763749999997,
-   'ChildHaloFrac0': 0.9642857141249418,
-   'ChildHaloFrac1': 0.0,
-   'ChildHaloFrac2': 0.0,
-   'ChildHaloFrac3': 0.0,
-   'ChildHaloFrac4': 0.0,
-   'ChildHaloID0': 1688,
-   'ChildHaloID1': 1712,
-   'ChildHaloID2': 1664,
-   'ChildHaloID3': 1657,
-   'ChildHaloID4': 1634,
-   'GlobalHaloID': 1544,
-   'HaloMass': 20934692770000.0,
-   'MaxRad': 0.01531299899,
-   'NumPart': 196,
-   'SnapCurrentTimeIdentifier': 1275946788,
-   'SnapHaloID': 56,
-   'SnapZ': 0.024169713061444002}
-
-If data from more than one halo is desired, more than one item will be returned.
-This query will find the largest halo from each of the snapshots.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-  line = "SELECT HaloMass,SnapZ FROM Halos WHERE SnapHaloID=0;"
-  results = mtc.query(line)
-
-``results`` is a list of multiple two-tuples.
-Note that SQLite doesn't return the values in any
-particular order. If order is unimportant, it saves time. But if order is
-important, you can modify the query to sort the results by redshift.
-
-.. code-block:: python
-
-  line = "SELECT HaloMass,SnapZ FROM Halos WHERE SnapHaloID=0 ORDER BY SnapZ DESC;"
-
-Now ``results`` will be ordered by time, first to last, for each two-tuple
-in the list.
-
-The function ``get_halo_parents()`` will return all the halos that are
-identified as parents of the specified halo.
-Due to the way that the halo tree is constructed, it will also return parent
-halos that have zero mass contribution to the specified halo.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-  parents = mtc.get_halo_parents(1688)
-  print parents
-  [[1544, 0.9642857141249418],
-   [1613, 0.0],
-   [1614, 0.0],
-   [1489, 0.0],
-   [1512, 0.0],
-   [1519, 0.0],
-   [1609, 0.0]]
-
-The last example shows the kernel of the most important operation for a
-merger tree: recursion back in time to find progenitors for a halo. Using a 
-query similar to ones above, the ``GlobalHaloID`` is found for the halo of
-interest at some late point in time (z=0, typically). Using that value (given
-the random-ish value of 1234567),
-the halos that came before can be identified very easily:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-
-  lineage = {}
-  # Recursive function on parent halos.
-  def findParent(haloID, lineage):
-      line = "SELECT GlobalHaloID from Halos where ChildHaloID0=%d;" % haloID
-      results = mtc.query(line)
-      if results == []:
-          return lineage
-      # A one-tuple inside a list.
-      parentID = results[0][0]
-      lineage[parentID] = haloID
-      # Now we recurse back in time.
-      lineage = findParent(parentID, lineage)
-
-  # Stores the parent->child relationships.
-  lineage = {}
-  # Call the function once with the late halo.
-  lineage = findParent(1234567, lineage)
-
-Contained within the dict ``lineage`` is the primary lineage for the final
-chosen halo. Storing the family tree in this way may not be the best choice,
-but this makes it clear how easy it is to build up the history of a halo
-over time.
-
-Merger Tree Convenience Functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Below are some examples of the convenience functions available.
-
-**get_GlobalHaloID(SnapHaloID, z)**. Returns the GlobalHaloID for the
-given halo.::
-
-        
-    Parameters
-    ---------
-    SnapHaloID : Integer
-        The index label for the halo of interest, equivalent to
-        the first column of the halo finder text output file.
-    z : Float
-        The redshift for the halo of interest. The value returned will be
-        for the halo with SnapHaloID equal to ID (above) with redshift
-        closest to this value.
-    
-    Examples
-    --------
-    >>> this_halo = mtc.get_GlobalHaloID(0, 0.)
-
-**get_halo_parents(GlobalHaloID)**. Returns a list of the parent halos to the
-given halo, along with the contribution fractions from parent to child.
-This function returns a list of lists, where each entry in the top list
-is [GlobalHaloID, ChildHaloFrac] of the parent halo in relationship
-to the given child halo.::
-        
-    Parameters
-    ----------
-    GlobalHaloID : Integer
-        The GlobalHaloID of the halo of interest.
-    
-    Examples
-    --------
-    >>> parents = mtc.get_halo_parents(1688)
-    >>> print parents
-    [[1544, 0.9642857141249418],
-     [1613, 0.0],
-     [1614, 0.0],
-     [1489, 0.0],
-     [1512, 0.0],
-     [1519, 0.0],
-     [1609, 0.0]]
-
-**get_direct_parent(GlobalHaloID)**. Returns the GlobalHaloID of the direct
-parent of the given halo.
-This is accomplished by identifying the most massive parent halo
-that contributes at least 50% of its mass to the given halo.::
-        
-    Parameters
-    ----------
-    GlobalHaloID : Integer
-        The GlobalHaloID of the halo of interest.
-    
-    Examples
-    --------
-    >>> parent = mtc.get_direct_parent(1688)
-    >>> print parent
-    1544
-
-**get_halo_info(GlobalHaloID)**. Returns all available information for
-the given GlobalHaloID in the form of a dict.::
-        
-    Parameters
-    ----------
-    GlobalHaloID : Integer
-        The unique index for the halo of interest.
-    
-    Examples
-    --------
-    >>> info = mtc.get_halo_info(1544)
-    >>> print info
-    {'BulkVelX': -32759799.359999999,
-     'BulkVelY': -28740239.109999999,
-     'BulkVelZ': -20066000.690000001,
-     'CenMassX': 0.23059111360000001,
-     'CenMassY': 0.4061139809,
-     'CenMassZ': 0.80882763749999997,
-     'ChildHaloFrac0': 0.9642857141249418,
-     'ChildHaloFrac1': 0.0,
-     'ChildHaloFrac2': 0.0,
-     'ChildHaloFrac3': 0.0,
-     'ChildHaloFrac4': 0.0,
-     'ChildHaloID0': 1688,
-     'ChildHaloID1': 1712,
-     'ChildHaloID2': 1664,
-     'ChildHaloID3': 1657,
-     'ChildHaloID4': 1634,
-     'GlobalHaloID': 1544,
-     'HaloMass': 20934692770000.0,
-     'MaxRad': 0.01531299899,
-     'NumPart': 196,
-     'SnapCurrentTimeIdentifier': 1275946788,
-     'SnapHaloID': 56,
-     'SnapZ': 0.024169713061444002}
-
-
-Merger Tree Output
-------------------
-
-There are two included methods for outputting the contents of a Merger Tree
-database: Graphviz and plain-text columnar format.
-
-Graphviz Output
-~~~~~~~~~~~~~~~
-
-The `Graphviz <http://graphviz.org/>`_ output function can write the merger
-tree to a text file, which can then be parsed by the GraphViz executable
-``dot`` into an image, or an image can be created directly.
-The GraphViz engine used to parse the
-output is the ``dot`` engine, which produces hierarchical diagrams where
-directionality (such as left to right or top to bottom)
-indicates some meaningful property.
-In the case of the merger tree, top to bottom indicates the progress of
-time.
-Graphviz can output the visualization into a wide range of image and vector
-formats suitable for any application.
-
-Below is a simple example of the Graphviz/dot visualization.
-Each box contains the mass of the halo (in Msun), and the center of mass
-for the halo in simulation units.
-For each snapshot, the box for the largest halo is colored red.
-The numbers next to the link arrows gives the percentage of the parent
-halo's mass that goes to the child.
-On each row, the un-linked black boxes
-contain the redshift for that snapshot.
-
-.. image:: _images/merger_tree_ex.png
-   :width: 400
-   :height: 438
-
-To output the merger tree for a set of halos, the chosen halos need to be
-identified. There are two choices, either the ``GlobalHaloID`` or
-the ``SnapHaloID`` along with the ``SnapCurrentTimeIdentifier`` value
-for the chosen halo(s) may be used.
-Two bits of information need to be used if ``GlobalHaloID`` is not specified
-because ``SnapHaloID`` is not an unique identifier in the database.
-The reason why ``SnapCurrentTimeIdentifier`` is used rather than ``SnapZ`` has
-to do with the float valuation of the redshift column and the way SQL queries
-work.
-If ``SnapZ`` were used, the precise float value of the desired redshift would
-have to be used, rather than the simpler-to-get-correct integer value of
-``SnapCurrentTimeIdentifier``.
-
-Luckily it isn't as hard as it sounds to get the ``GlobalHaloID`` for the
-desired halo(s).
-By using the ``MergerTreeConnect`` class, it is simple to pick out halos
-before creating the Graphviz output.
-Below, the ``GlobalHaloID`` for the most massive halo in the last (z~0, typically)
-snapshot is found:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-  
-  line = "SELECT max(GlobalHaloID) FROM Halos WHERE SnapHaloID=0;"
-  results = mtc.query(line)
-  print results
-
-Because of the way the database is created, from early times to late, the most
-massive halo at z~0 will have the largest ``GlobalHaloID`` for all halos with
-``SnapHaloID``=0. ``results`` will contain a one-tuple in a list of the
-desired ``GlobalHaloID``.
-
-Alternatively, one of the convenience functions can be used which may be easier:
-
-.. code-block:: python
-
-  from yt.mods import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-  
-  thisHalo = mtc.get_GlobalHaloID(0, 0.0)
-
-``thisHalo`` will be an integer giving the GlobalHaloID for the most massive
-halo (ID=0) at z=0.0.
-
-To output the merger tree for the five largest halos in the last snapshot,
-it may be simplest to find the ``SnapCurrentTimeIdentifier`` for that
-snapshot.
-This can either be done by referencing the dataset itself by hand
-(look for ``CurrentTimeIdentifier`` in the Enzo restart file), or by querying
-the database.
-Here is how to query the database for the right information:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  mtc = MergerTreeConnect(database='halos.db')
-  
-  line = "SELECT max(GlobalHaloID) FROM Halos WHERE SnapHaloID=0;"
-  results = mtc.query(line)
-  
-  line = "SELECT SnapCurrentTimeIdentifier FROM Halos WHERE GlobalHaloID=%d;" % results[0][0]
-  results = mtc.query(line)
-  print results
-
-``results`` contains a one-tuple in a list of the desired
-``SnapCurrentTimeIdentifier``.
-Supposing that the desired ``SnapCurrentTimeIdentifier`` is 72084721, outputting
-merger trees is now simple:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  MergerTreeDotOutput(halos=[0,1,2,3,4], database='halos.db',
-      dotfile='MergerTree.gv', current_time=72084721)
-
-This will output the file ``MergerTree.gv`` which can be parsed by Graphviz.
-To output to an image format, name the file appropriately (``MergerTree.png``).
-A list of available GraphViz image formats can be found by invoking
-(from the command line) ``dot -v``.
-
-If the ``GlobalHaloID`` values are known for all of the desired halos,
-``current_time`` should not be specified, as below:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  MergerTreeDotOutput(halos=[24212,5822,19822,10423,51324], database='halos.db',
-      dotfile='MergerTree.gv', link_min=0.7)
-
-The ``link_min`` parameter above limits the tree to following links between
-parent and child halos for which at least 70% of the parent halo's mass goes
-to the child. The default is 0.2.
-
-In this slightly modified example below, if ``dot`` is installed in the
-``PATH``, an image file will be created without an intermediate text file:
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  MergerTreeDotOutput(halos=[24212,5822,19822,10423,51324], database='halos.db',
-      dotfile='MergerTree.png', link_min=0.7)
-
-
-Plain-Text Output
-~~~~~~~~~~~~~~~~~
-
-This is how to output the entire contents of the database to a text file:
-
-.. code-block:: python
-
-  from yt.analysis_modules.halo_merger_tree.api import *
-  
-  MergerTreeTextOutput(database='halos.db', outfile='MergerTreeDB.txt')
-
-Putting it All Together
------------------------
-
-Here is an example of how to create a merger tree for the most massive halo
-in the final snapshot from start to finish, and output the Graphviz
-visualization as a PDF file.
-This will work in serial and in parallel.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_merger_tree.api import *
-  from yt.analysis_modules.halo_finding.api import *
-
-  # Pick our snapshots to use.
-  files = []
-  start = 100
-  finish = 116
-  for i in range(start, finish + 1):
-      files.append('/path/to/snapshots/DD%04d/data%04d' % (i, i))
-
-  my_database = '/path/to/database/halos.db'
-
-  # Build the tree.
-  MergerTree(restart_files=files, database=my_database)
-  
-  # Get the GlobalHaloID for the halo.
-  mtc = MergerTreeConnect(database=my_database)
-  my_halo = mtc.get_GlobalHaloID(0, 0.0)
-  
-  # Output the tree as a PDF file.
-  MergerTreeDotOutput(halos=my_halo, database=my_database, link_min=0.5,
-      dotfile='MergerTree.pdf')
-
-
-  

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/photon_simulator.rst
--- a/doc/source/analyzing/analysis_modules/photon_simulator.rst
+++ b/doc/source/analyzing/analysis_modules/photon_simulator.rst
@@ -35,6 +35,11 @@
 We'll demonstrate the functionality on a realistic dataset of a galaxy
 cluster to get you started.
 
+.. note::
+
+  Currently, the ``photon_simulator`` analysis module only works with grid-based
+  data.
+  
 Creating an X-ray observation of a dataset on disk
 ++++++++++++++++++++++++++++++++++++++++++++++++++
 

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/radial_column_density.rst
--- a/doc/source/analyzing/analysis_modules/radial_column_density.rst
+++ b/doc/source/analyzing/analysis_modules/radial_column_density.rst
@@ -5,6 +5,14 @@
 .. sectionauthor:: Stephen Skory <s at skory.us>
 .. versionadded:: 2.3
 
+.. note:: 
+
+    As of :code:`yt-3.0`, the radial column density analysis module is not
+    currently functional.  This functionality is still available in
+    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
+    help is needed to port them over.  Contact the yt-users mailing list if you
+    are interested in doing this.
+
 This module allows the calculation of column densities around a point over a
 field such as ``NumberDensity`` or ``Density``.
 This uses :ref:`healpix_volume_rendering` to interpolate column densities

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/radmc3d_export.rst
--- a/doc/source/analyzing/analysis_modules/radmc3d_export.rst
+++ b/doc/source/analyzing/analysis_modules/radmc3d_export.rst
@@ -6,6 +6,14 @@
 .. sectionauthor:: Andrew Myers <atmyers2 at gmail.com>
 .. versionadded:: 2.6
 
+.. note:: 
+
+    As of :code:`yt-3.0`, the radial column density analysis module is not
+    currently functional.  This functionality is still available in
+    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
+    help is needed to port them over.  Contact the yt-users mailing list if you
+    are interested in doing this.
+
 `RADMC-3D
 <http://www.ita.uni-heidelberg.de/~dullemond/software/radmc-3d/>`_ is a three-dimensional Monte-Carlo radiative transfer code
 that is capable of handling both line and continuum emission. The :class:`~yt.analysis_modules.radmc3d_export.RadMC3DInterface.RadMC3DWriter`

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/star_analysis.rst
--- a/doc/source/analyzing/analysis_modules/star_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/star_analysis.rst
@@ -5,6 +5,14 @@
 .. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
 .. versionadded:: 1.6
 
+.. note:: 
+
+    As of :code:`yt-3.0`, the star particle analysis module is not currently
+    functional.  This functionality is still available in :code:`yt-2.x`.  If
+    you would like to use these features in :code:`yt-3.x`, help is needed to
+    port them over.  Contact the yt-users mailing list if you are interested in
+    doing this.
+
 This document describes tools in yt for analyzing star particles.
 The Star Formation Rate tool bins stars by time to produce star formation
 statistics over several metrics.

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/sunrise_export.rst
--- a/doc/source/analyzing/analysis_modules/sunrise_export.rst
+++ b/doc/source/analyzing/analysis_modules/sunrise_export.rst
@@ -6,6 +6,13 @@
 .. sectionauthor:: Christopher Moody <cemoody at ucsc.edu>
 .. versionadded:: 1.8
 
+.. note:: 
+
+    As of :code:`yt-3.0`, the sunrise exporter is not currently functional.
+    This functionality is still available in :code:`yt-2.x`.  If you would like
+    to use these features in :code:`yt-3.x`, help is needed to port them over.
+    Contact the yt-users mailing list if you are interested in doing this.
+
 The yt-Sunrise exporter essentially takes grid cell data and translates it into a binary octree format, attaches star particles, and saves the output to a FITS file Sunrise can read. For every cell, the gas mass, metals mass (a fraction of which is later assumed to be in the form of dust), and the temperature are saved. Star particles are defined entirely by their mass, position, metallicity, and a 'radius.' This guide outlines the steps to exporting the data, troubleshoots common problems, and reviews recommended sanity checks. 
 
 Simple Export

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -16,6 +16,5 @@
    star_analysis
    xray_emission_fields
    sunyaev_zeldovich
-   radial_column_density
    photon_simulator
    ppv_cubes

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/analysis_modules/two_point_functions.rst
--- a/doc/source/analyzing/analysis_modules/two_point_functions.rst
+++ b/doc/source/analyzing/analysis_modules/two_point_functions.rst
@@ -5,6 +5,14 @@
 .. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
 .. versionadded:: 1.7
 
+.. note:: 
+
+    As of :code:`yt-3.0`, the two point function analysis module is not
+    currently functional.  This functionality is still available in
+    :code:`yt-2.x`.  If you would like to use these features in :code:`yt-3.x`,
+    help is needed to port them over.  Contact the yt-users mailing list if you
+    are interested in doing this.
+
 The Two Point Functions framework (TPF) is capable of running several
 multi-dimensional two point functions simultaneously on a dataset using
 memory and workload parallelism.
@@ -19,8 +27,7 @@
 
 The TPF relies on the Fortran kD-tree that is used
 by the parallel HOP halo finder. The kD-tree is not built by default with yt
-so it must be built by hand. Please follow the instructions on how to build it,
-see :ref:`fkd_setup`.
+so it must be built by hand.
 
 Quick Example
 -------------

diff -r e3e7b69f36e45149c08526ae54e80dd073769a44 -r fa87d397f01c132c3d126b5dff0b7af9f3940672 doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ b/doc/source/analyzing/creating_derived_fields.rst
@@ -11,7 +11,7 @@
 
 So once a new field has been conceived of, the best way to create it is to
 construct a function that performs an array operation -- operating on a 
-collection of data, neutral to its size, shape, and type.  (All fields should
+collection of data, neutral to its size, shape, and type. (All fields should
 be provided as 64-bit floats.)
 
 A simple example of this is the pressure field, which demonstrates the ease of
@@ -19,11 +19,13 @@
 
 .. code-block:: python
 
-   def _Pressure(field, data):
+   import yt
+
+   def _pressure(field, data):
        return (data.ds.gamma - 1.0) * \
               data["density"] * data["thermal_energy"]
 
-Note that we do a couple different things here.  We access the "Gamma"
+Note that we do a couple different things here.  We access the "gamma"
 parameter from the dataset, we access the "density" field and we access
 the "thermal_energy" field.  "thermal_energy" is, in fact, another derived field!
 ("thermal_energy" deals with the distinction in storage of energy between dual
@@ -37,247 +39,125 @@
 
 .. code-block:: python
 
-   add_field("pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
 
 We feed it the name of the field, the name of the function, and the
-units.  Note that the units parameter is a "raw" string, with some
-LaTeX-style formatting -- Matplotlib actually has a MathText rendering
-engine, so if you include LaTeX it will be rendered appropriately.
+units.  Note that the units parameter is a "raw" string, in the format that ``yt`` uses
+in its `symbolic units implementation <units>`_ (e.g., employing only unit names, numbers,
+and mathematical operators in the string, and using ``"**"`` for exponentiation). We suggest
+that you name the function that creates a derived field with the intended field name prefixed
+by a single underscore, as in the ``_pressure`` example above.
 
-.. One very important thing to note about the call to ``add_field`` is
-.. that it **does not** need to specify the function name **if** the
-.. function is the name of the field prefixed with an underscore.  If it
-.. is not -- and it won't be for fields in different units (such as
-.. "cell_mass") -- then you need to specify it with the argument
-.. ``function``.
+:func:`add_field` can be invoked in two other ways. The first is by the function
+decorator :func:`derived_field`. The following code is equivalent to the previous
+example:
 
-We suggest that you name the function that creates a derived field
-with the intended field name prefixed by a single underscore, as in
-the ``_Pressure`` example above.
+.. code-block:: python
+
+   from yt import derived_field
+
+   @derived_field(name="pressure", units="dyne/cm**2")
+   def _pressure(field, data):
+       return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+
+The :func:`derived_field` decorator takes the same arguments as :func:`add_field`,
+and is often a more convenient shorthand in cases where you want to quickly set up
+a new field.
+
+Defining derived fields in the above fashion must be done before a dataset is loaded,
+in order for the dataset to recognize it. If you want to set up a derived field after you
+have loaded a dataset, or if you only want to set up a derived field for a particular
+dataset, there is an :meth:`add_field` method that hangs off dataset objects. The calling
+syntax is the same:
+
+.. code-block:: python
+
+   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
+   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
 
 If you find yourself using the same custom-defined fields over and over, you
 should put them in your plugins file as described in :ref:`plugin-file`.
 
-.. _conversion-factors:
+A More Complicated Example
+--------------------------
 
-Conversion Factors
-~~~~~~~~~~~~~~~~~~
-
-When creating a derived field, ``yt`` does not by default do unit
-conversion.  All of the fields fed into the field are pre-supposed to
-be in CGS.  If the field does not need any constants applied after
-that, you are done. If it does, you should define a second function
-that applies the proper multiple in order to return the desired units
-and use the argument ``convert_function`` to ``add_field`` to point to
-it.  
-
-The argument that you pass to ``convert_function`` will be dependent on 
-what fields are input into your derived field, and in what form they
-are passed from their native format.  For enzo fields, nearly all the
-native on-disk fields are in CGS units already (except for ``dx``, ``dy``,
-and ``dz`` fields), so you typically only need to convert for 
-off-standard fields taking into account where those fields are 
-used in the final output derived field.  For other codes, it can vary.
-
-You can check to see the units associated with any field in a dataset
-from any code by using the ``_units`` attribute.  Here is an example 
-with one of our sample FLASH datasets available publicly at 
-http://yt-project.org/data :
+But what if we want to do something a bit more fancy?  Here's an example of getting
+parameters from the data object and using those to define the field;
+specifically, here we obtain the ``center`` and ``bulk_velocity`` parameters
+and use those to define a field for radial velocity (there is already a ``"radial_velocity"``
+field in ``yt``, but we create this one here just as a transparent and simple example).
 
 .. code-block:: python
 
-   >>> from yt.mods import *
-   >>> ds = load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   >>> ds.field_list
-   ['dens', 'temp', 'pres', 'gpot', 'divb', 'velx', 'vely', 'velz', 'magx', 'magy', 'magz', 'magp']
-   >>> ds.field_info['dens']._units
-   '\\rm{g}/\\rm{cm}^{3}'
-   >>> ds.field_info['temp']._units
-   '\\rm{K}'
-   >>> ds.field_info['velx']._units
-   '\\rm{cm}/\\rm{s}'
+   from yt.fields.api import ValidateParameter
+   import numpy as np
 
-Thus if you were using any of these fields as input to your derived field, you 
-wouldn't have to worry about unit conversion because they're already in CGS.
+   def _my_radial_velocity(field, data):
+       if data.has_field_parameter("bulk_velocity"):
+           bv = data.get_field_parameter("bulk_velocity").in_units("cm/s")
+       else:
+           bv = data.ds.arr(np.zeros(3), "cm/s")
+       xv = data["gas","velocity_x"] - bv[0]
+       yv = data["gas","velocity_y"] - bv[1]
+       zv = data["gas","velocity_z"] - bv[2]
+       center = data.get_field_parameter('center')
+       x_hat = data["x"] - center[0]
+       y_hat = data["y"] - center[1]
+       z_hat = data["z"] - center[2]
+       r = np.sqrt(x_hat*x_hat+y_hat*y_hat+z_hat*z_hat)
+       x_hat /= r
+       y_hat /= r
+       z_hat /= r
+       return xv*x_hat + yv*y_hat + zv*z_hat
+   yt.add_field("my_radial_velocity",
+                function=_my_radial_velocity,
+                units="cm/s",
+                take_log=False,
+                validators=[ValidateParameter('center'),
+                            ValidateParameter('bulk_velocity')])
 
-Some More Complicated Examples
-------------------------------
-
-But what if we want to do some more fancy stuff?  Here's an example of getting
-parameters from the data object and using those to define the field;
-specifically, here we obtain the ``center`` and ``height_vector`` parameters
-and use those to define an angle of declination of a point with respect to a
-disk.
+Note that we have added a few parameters below the main function; we specify
+that we do not wish to display this field as logged, that we require both
+``bulk_velocity`` and ``center`` to be present in a given data object we wish
+to calculate this for, and we say that it should not be displayed in a
+drop-down box of fields to display. This is done through the parameter
+*validators*, which accepts a list of :class:`FieldValidator` objects. These
+objects define the way in which the field is generated, and when it is able to
+be created. In this case, we mandate that parameters *center* and
+*bulk_velocity* are set before creating the field. These are set via
+:meth:`~yt.data_objects.data_containers.set_field_parameter`, which can 
+be called on any object that has fields:
 
 .. code-block:: python
 
-   def _DiskAngle(field, data):
-       # We make both r_vec and h_vec into unit vectors
-       center = data.get_field_parameter("center")
-       r_vec = np.array([data["x"] - center[0],
-                         data["y"] - center[1],
-                         data["z"] - center[2]])
-       r_vec = r_vec/np.sqrt((r_vec**2.0).sum(axis=0))
-       h_vec = np.array(data.get_field_parameter("height_vector"))
-       dp = r_vec[0,:] * h_vec[0] \
-          + r_vec[1,:] * h_vec[1] \
-          + r_vec[2,:] * h_vec[2]
-       return np.arccos(dp)
-   add_field("DiskAngle", take_log=False,
-             validators=[ValidateParameter("height_vector"),
-                         ValidateParameter("center")],
-             display_field=False)
+   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
+   sp = ds.sphere("max", (200.,"kpc"))
+   sp.set_field_parameter("bulk_velocity", yt.YTArray([-100.,200.,300.], "km/s"))
 
-Note that we have added a few parameters below the main function; we specify
-that we do not wish to display this field as logged, that we require both
-``height_vector`` and ``center`` to be present in a given data object we wish
-to calculate this for, and we say that it should not be displayed in a
-drop-down box of fields to display.  This is done through the parameter
-*validators*, which accepts a list of :class:`FieldValidator` objects.  These
-objects define the way in which the field is generated, and when it is able to
-be created.  In this case, we mandate that parameters *center* and
-*height_vector* are set before creating the field.  These are set via 
-:meth:`~yt.data_objects.data_containers.set_field_parameter`, which can 
-be called on any object that has fields.
+In this case, we already know what the *center* of the sphere is, so we do not set it. Also,
+note that *center* and *bulk_velocity* need to be :class:`YTArray` objects with units.
 
-We can also define vector fields.
-
-.. code-block:: python
-
-   def _SpecificAngularMomentum(field, data):
-       if data.has_field_parameter("bulk_velocity"):
-           bv = data.get_field_parameter("bulk_velocity")
-       else: bv = np.zeros(3, dtype='float64')
-       xv = data["velocity_x"] - bv[0]
-       yv = data["velocity_y"] - bv[1]
-       zv = data["velocity_z"] - bv[2]
-       center = data.get_field_parameter('center')
-       coords = np.array([data['x'],data['y'],data['z']], dtype='float64')
-       new_shape = tuple([3] + [1]*(len(coords.shape)-1))
-       r_vec = coords - np.reshape(center,new_shape)
-       v_vec = np.array([xv,yv,zv], dtype='float64')
-       return np.cross(r_vec, v_vec, axis=0)
-   def _convertSpecificAngularMomentum(data):
-       return data.convert("cm")
-   add_field("SpecificAngularMomentum",
-             convert_function=_convertSpecificAngularMomentum, vector_field=True,
-             units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
-
-Here we define the SpecificAngularMomentum field, optionally taking a
-``bulk_velocity``, and returning a vector field that needs conversion by the
-function ``_convertSpecificAngularMomentum``.
-
-It is also possible to define fields that depend on spatial derivatives of 
-other fields.  Calculating the derivative for a single grid cell requires 
-information about neighboring grid cells.  Therefore, properly calculating 
-a derivative for a cell on the edge of the grid will require cell values from 
-neighboring grids.  Below is an example of a field that is the divergence of the 
-velocity.
-
-.. code-block:: python
-
-    def _DivV(field, data):
-        # We need to set up stencils
-        if data.ds["HydroMethod"] == 2:
-            sl_left = slice(None,-2,None)
-            sl_right = slice(1,-1,None)
-            div_fac = 1.0
-        else:
-            sl_left = slice(None,-2,None)
-            sl_right = slice(2,None,None)
-            div_fac = 2.0
-        ds = div_fac * data['dx'].flat[0]
-        f  = data["velocity_x"][sl_right,1:-1,1:-1]/ds
-        f -= data["velocity_x"][sl_left ,1:-1,1:-1]/ds
-        if data.ds.dimensionality > 1:
-            ds = div_fac * data['dy'].flat[0]
-            f += data["velocity_y"][1:-1,sl_right,1:-1]/ds
-            f -= data["velocity_y"][1:-1,sl_left ,1:-1]/ds
-        if data.ds.dimensionality > 2:
-            ds = div_fac * data['dz'].flat[0]
-            f += data["velocity_z"][1:-1,1:-1,sl_right]/ds
-            f -= data["velocity_z"][1:-1,1:-1,sl_left ]/ds
-        new_field = np.zeros(data["velocity_x"].shape, dtype='float64')
-        new_field[1:-1,1:-1,1:-1] = f
-        return new_field
-    def _convertDivV(data):
-        return data.convert("cm")**-1.0
-    add_field("DivV", function=_DivV,
-               validators=[ValidateSpatial(ghost_zones=1,
-	                   fields=["velocity_x","velocity_y","velocity_z"])],
-              units=r"\rm{s}^{-1}", take_log=False,
-              convert_function=_convertDivV)
-
-Note that *slice* is simply a native Python object used for taking slices of 
-arrays or lists.  Another :class:`FieldValidator` object, ``ValidateSpatial`` 
-is given in the list of *validators* in the call to ``add_field`` with 
-*ghost_zones* = 1, specifying that the original grid be padded with one additional 
-cell from the neighboring grids on all sides.  The *fields* keyword simply 
-mandates that the listed fields be present.  With one ghost zone added to all sides 
-of the grid, the data fields (data["velocity_x"], data["velocity_y"], and 
-data["velocity_z"]) will have a shape of (NX+2, NY+2, NZ+2) inside of this function, 
-where the original grid has dimension (NX, NY, NZ).  However, when the final field 
-data is returned, the ghost zones will be removed and the shape will again be 
-(NX, NY, NZ).
+Other examples for creating derived fields can be found in the cookbook recipes
+:ref:`cookbook-simple-derived-fields` and :ref:`cookbook-complex-derived-fields`.
 
 .. _derived-field-options:
 
-Saving Derived Fields
----------------------
-
-Complex fields can be time-consuming to generate, especially on large datasets.
-To mitigate this, ``yt`` provides a mechanism for saving fields to a backup file
-using the Grid Data Format. The next time you start yt, it will check this file
-and your field will be treated as native if present. 
-
-The code below creates a new derived field called "Entr" and saves it to disk:
-
-.. code-block:: python
-
-    from yt.mods import *
-    from yt.utilities.grid_data_format import writer
-
-    def _Entropy(field, data) :
-        return data["temperature"]*data["density"]**(-2./3.)
-    add_field("Entr", function=_Entropy)
-
-    ds = load('GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100')
-    writer.save_field(ds, "Entr")
-
-This creates a "_backup.gdf" file next to your datadump. If you load up the dataset again:
-
-.. code-block:: python
-
-    from yt.mods import *
-
-    ds = load('GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100')
-    data = ds.all_data()
-    print data["Entr"]
-
-you can work with the field exactly as before, without having to recompute it.
-
 Field Options
 -------------
 
-The arguments to :func:`add_field` are passed on to the constructor of
-:class:`DerivedField`.  :func:`add_field` takes care of finding the arguments
-`function` and `convert_function` if it can, however.  There are a number of
-options available, but the only mandatory ones are ``name`` and possibly
-``function``.
+The arguments to :func:`add_field` are passed on to the constructor of :class:`DerivedField`.
+There are a number of options available, but the only mandatory ones are ``name``,
+``units``, and ``function``.
 
    ``name``
      This is the name of the field -- how you refer to it.  For instance,
-     ``Pressure`` or ``H2I_Fraction``.
+     ``pressure`` or ``magnetic_field_strength``.
    ``function``
      This is a function handle that defines the field
-   ``convert_function``
-     This is the function that converts the field to CGS.  All inputs to this
-     function are mandated to already *be* in CGS.
    ``units``
-     This is a mathtext (LaTeX-like) string that describes the units.
-   ``projected_units``
-     This is a mathtext (LaTeX-like) string that describes the units if the
-     field has been projected without a weighting.
+     This is a string that describes the units. Powers must be in
+     Python syntax (``**`` instead of ``^``).
    ``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
@@ -289,43 +169,14 @@
    ``validators``
      (*Advanced*) This is a list of :class:`FieldValidator` objects, for instance to mandate
      spatial data.
-   ``vector_field``
-     (*Advanced*) Is this field more than one value per cell?
    ``display_field``
      (*Advanced*) Should this field appear in the dropdown box in Reason?
    ``not_in_all``
      (*Advanced*) If this is *True*, the field may not be in all the grids.
-
-How Do Units Work?
-------------------
-
-The best way to understand yt's unit system is to keep in mind that ``yt`` is really
-handling *two* unit systems: the internal unit system of the dataset and the
-physical (usually CGS) unit system.  For simulation codes like FLASH and ORION
-that do all computations in CGS units internally, these two unit systems are the
-same.  Most other codes do their calculations in a non-dimensionalized unit
-system chosen so that most primitive variables are as close to unity as
-possible.  ``yt`` allows data access both in code units and physical units by
-providing a set of standard yt fields defined by all frontends.
-
-When a dataset is loaded, ``yt`` reads the conversion factors necessary convert the
-data to CGS units from the datafile itself or from a dictionary passed to the
-``load`` command.  Raw on-disk fields are presented to the user via the string
-names used in the dataset.  For a full enumeration of the known field names for
-each of the different frontends, see the :ref:`field-list`. In general, no
-conversion factors are applied to on-disk fields.
-
-To access data in physical CGS units, yt recognizes a number of 'universal'
-field names.  All primitive fields (density, pressure, magnetic field strength,
-etc.) are mapped to Enzo field names, listed in the :ref:`enzo-field-names`.
-The reason Enzo field names are used here is because ``yt`` was originally written
-to only read Enzo data.  In the future we will switch to a new system of
-universal field names - this will also make it much easier to access raw on-disk
-Enzo data!
-
-In addition to primitive fields, yt provides an extensive list of "universal"
-derived fields that are accessible from any of the frontends.  For a full
-listing of the universal derived fields, see :ref:`universal-field-list`.
+   ``output_units``
+     (*Advanced*) For fields that exist on disk, which we may want to convert to other
+     fields or that get aliased to themselves, we can specify a different
+     desired output unit than the unit found on disk.
 
 Units for Cosmological Datasets
 -------------------------------
@@ -333,14 +184,13 @@
 ``yt`` has additional capabilities to handle the comoving coordinate system used
 internally in cosmological simulations. Simulations that use comoving
 coordinates, all length units have three other counterparts correspoding to
-comoving units, scaled comoving units, and scaled proper units.  In all cases
-'scaled' units refer to scaling by the reduced Hubble constant - i.e. the length
-unit is what it would be in a universe where Hubble's constant is 100 km/s/Mpc.  
+comoving units, scaled comoving units, and scaled proper units. In all cases
+'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
+unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.
 
-To access these different units, yt has a common naming system.  Scaled units
-are denoted by appending ``h`` to the end of the unit name.  Comoving units are
-denoted by appending ``cm`` to the end of the unit name.  If both are used, the
-strings should be appended in that order: 'Mpchcm', *but not* 'Mpccmh'.
+To access these different units, yt has a common naming system. Scaled units are denoted by
+dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving
+units are denoted by appending ``cm`` to the end of the unit name.
 
 Using the parsec as an example,
 
@@ -350,28 +200,10 @@
 ``pccm``
     Comoving parsecs, :math:`\rm{pc}/(1+z)`.
 
-``pchcm``
+``pccm/h``
     Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`.
 
-``pch``
+``pc/h``
     Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`.
 
-Which Enzo Field names Does ``yt`` Know About?
-----------------------------------------------
-
-These are the names of primitive fields in the Enzo AMR code.  ``yt`` was originally
-written to analyze Enzo data so the default field names used by the various
-frontends are the same as Enzo fields.
-
-.. note::
-
-   Enzo field names are *universal* yt fields.  All frontends define conversions
-   to Enzo fields.  Enzo fields are always in CGS.
-
-* Density
-* Temperature
-* Gas Energy
-* Total Energy
-* [xyz]-velocity
-* Species fields: HI, HII, Electron, HeI, HeII, HeIII, HM, H2I, H2II, DI, DII, HDI
-* Particle mass, velocity, 
+Further examples of this functionality are shown in :ref:`comoving_units_and_code_units`.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/ca20858d7149/
Changeset:   ca20858d7149
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 17:54:34
Summary:     Adding function "box" to Dataset which wraps to Region class but does not require a ``center``.
Affected #:  1 file

diff -r fa87d397f01c132c3d126b5dff0b7af9f3940672 -r ca20858d71493546ff22fd1ab69685283f7d9231 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -573,11 +573,26 @@
 
     # Now all the object related stuff
     def all_data(self, find_max=False):
+        """
+        all_data is a wrapper to the Region object for creating a region
+        which covers the entire simulation domain.
+        """
         if find_max: c = self.find_max("density")[1]
         else: c = (self.domain_right_edge + self.domain_left_edge)/2.0
         return self.region(c,
             self.domain_left_edge, self.domain_right_edge)
 
+    def box(self, left_edge, right_edge, **kwargs):
+        """
+        box is a wrapper to the Region object for creating a region
+        without having to specify a *center* value.  It assumes the center
+        is the midpoint between the left_edge and right_edge.
+        """
+        left_edge = np.array(left_edge)
+        right_edge = np.array(right_edge)
+        c = (left_edge + right_edge)/2.0
+        return self.region(c, left_edge, right_edge, **kwargs)
+
     def _setup_particle_type(self, ptype):
         orig = set(self.field_info.items())
         self.field_info.setup_particle_fields(ptype)


https://bitbucket.org/yt_analysis/yt/commits/c96f92bf9b04/
Changeset:   c96f92bf9b04
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 18:57:08
Summary:     Adding section written by John Zuhone on the different projection styles.
Affected #:  1 file

diff -r ca20858d71493546ff22fd1ab69685283f7d9231 -r c96f92bf9b0418779cacb6858b2c5fffb239ba61 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -224,6 +224,51 @@
 :class:`~yt.visualization.plot_window.ProjectionPlot` for the full
 class description.
 
+.. _projection-types:
+
+Types of Projections
+""""""""""""""""""""
+
+There are several different styles of projections that can be made either 
+when creating a projection with ds.proj() or when making a ProjectionPlot.  
+In either construction method, set the ``style`` keyword to be one of the 
+following:
+
+``integrate`` (unweighted)
+    This is the default projection style. It simply integrates the 
+    requested field  :math:`f(x)` along a line of sight  :math:`\hat{n}` , 
+    given by the axis parameter.  The units of the projected field  
+    :math:`g(X)` will be the units of the unprojected field  :math:`f(x)` 
+    multiplied by the appropriate length unit, e.g., density in  
+    :math:`\mathrm{g\ cm^{-3}}` will be projected to  :math:`\mathrm{g\ cm^{-2}}`. 
+
+.. math::
+
+    g(\textbf{X}) = {\Big\int\ {f(\textbf{r})\textbf{\hat{n}}\cdot{\mathrm{d\textbf{x}}}}}
+
+``integrate`` (weighted)
+    When using the ``integrate``  style, a ``weight_field`` argument may also 
+    be specified, which will produce a weighted projection.  :math:`w(\textbf{x})` 
+    is the field used as a weight. One common example would 
+    be to weight the "temperature" field by the "density" field. In this case, 
+    the units of the projected field are the same as the unprojected field.
+
+.. math::
+
+    g(\textbf{X}) = \frac{\Big\int\ {f(\textbf{x})w(\textbf{x})\textbf{\hat{n}}\cdot{\mathrm{d\textbf{x}}}}}{\Big\int\ {w(\textbf{x})\textbf{\hat{n}}\cdot{\mathrm{d\textbf{x}}}}}
+
+``mip`` 
+    This style picks out the maximum value of a field along the line of 
+    sight given by the axis parameter.
+
+``sum``
+    This style is the same as ``integrate``, except that it does not 
+    multiply by a path length when performing the integration, and is just a 
+    straight summation of the field along the given axis. The units of the 
+    projected field will be the same as those of the unprojected field. This 
+    style is typically only useful for datasets such as 3D FITS cubes where 
+    the third axis of the dataset is something like velocity or frequency.
+
 .. _off-axis-projections:
 
 Off Axis Projection Plots


https://bitbucket.org/yt_analysis/yt/commits/8c75350f4338/
Changeset:   8c75350f4338
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:13:10
Summary:     Updating fields docs.
Affected #:  1 file

diff -r c96f92bf9b0418779cacb6858b2c5fffb239ba61 -r 8c75350f4338a611e9424820e17d73c448f4ec3c doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -169,6 +169,35 @@
 
 .. include reference here once it's done
 
+The full list of fields that are available for a dataset can be found as a 
+the attribute ``field_list`` for native, on-disk fields and ``derived_field_list``
+which is a superset of ``field_list`` and includes all derived fields that can
+be calculated for a given dataset.  You can see these by examining the 
+two properties:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print ds.field_list
+   print ds.derived_field_list
+
+When a field is added, it is added to a container that hangs off of the
+dataset, as well.  All of the field creation options
+(:ref:`derived-field-options`) are accessible through this object:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print ds.field_info["pressure"].get_units()
+
+This is a fast way to examine the units of a given field, and additionally you
+can use :meth:`yt.utilities.pydot.get_source` to get the source code for a field:
+
+.. code-block:: python
+
+   field = ds.field_info["pressure"]
+   print field.get_source()
+
 Particle Fields
 ---------------
 


https://bitbucket.org/yt_analysis/yt/commits/df900d59af02/
Changeset:   df900d59af02
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:29:23
Summary:     Correcting some bugs.
Affected #:  2 files

diff -r 8c75350f4338a611e9424820e17d73c448f4ec3c -r df900d59af025a7edb037307ca05c6a04076c874 doc/source/visualizing/streamlines.rst
--- a/doc/source/visualizing/streamlines.rst
+++ b/doc/source/visualizing/streamlines.rst
@@ -2,7 +2,6 @@
 
 Streamlining
 ================
-.. versionadded:: 2.1
 
 Streamlines, as implemented in ``yt``, are defined as being parallel to a
 vector field at all points.  While commonly used to follow the

diff -r 8c75350f4338a611e9424820e17d73c448f4ec3c -r df900d59af025a7edb037307ca05c6a04076c874 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -110,7 +110,7 @@
       the vertex-centered data.
    #. Each transfer function is evaluated at each sample point.  This gives us,
       for each channel, both emission (:math:`j`) and absorption
-      (:math:`alpha`) values.
+      (:math:`\alpha`) values.
    #. The value for the pixel corresponding to the current ray is updated with
       new values calculated by rectangular integration over the path length:
 


https://bitbucket.org/yt_analysis/yt/commits/d2148f257d90/
Changeset:   d2148f257d90
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:29:49
Summary:     Updating the objects docs.
Affected #:  2 files

diff -r df900d59af025a7edb037307ca05c6a04076c874 -r d2148f257d90a527d0fb77118c30669a3cdf7a16 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -75,6 +75,8 @@
     print 'Density of "overpressure and fast" data: ad["density"][overpressure_and_fast] = \n%s' % \
            ad['density'][overpressure_and_fast]
 
+.. _cut-regions:
+
 Cut Regions
 ^^^^^^^^^^^
 

diff -r df900d59af025a7edb037307ca05c6a04076c874 -r d2148f257d90a527d0fb77118c30669a3cdf7a16 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,4 +1,4 @@
-XXX.. _using-objects:
+.. _data-objects:
 
 Data Objects
 ============
@@ -18,10 +18,8 @@
 However, for detailed data inspection as well as hand-crafted derived data,
 objects can be exceptionally useful and even necessary.
 
-For geometric objects, if the shape intersectsXXX
-
-How to Create an Object
------------------------
+How to Create and Use an Object
+-------------------------------
 
 To create an object, you usually only need a loaded dataset, the name of 
 the object type, and the relevant parameters for your object.  Here is a common
@@ -34,13 +32,36 @@
    ad = ds.all_data()
 
 Alternatively, we could create a sphere object of radius 1 kpc on location 
-[0.5, 0.5, 0.5] using the dataset quantity 1 kpc:
+[0.5, 0.5, 0.5]:
 
 .. code-block:: python
 
    import yt
    ds = yt.load("RedshiftOutput0005")
-   sp = ds.sphere([0.5, 0.5, 0.5], ds.quan(1, 'kpc'))
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
+
+After an object has been created, it can be used as a data_source to certain
+tasks like ``ProjectionPlot`` (see 
+:class:`~yt.visualization.plot_window.ProjectionPlot`), one can compute the 
+bulk quantities associated with that object (see :ref:`derived-quantities`), 
+or the data can be examined directly. For example, if you want to figure out 
+the temperature at all indexed locations in the central sphere of your 
+dataset you could:
+
+.. code-block:: python
+
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
+
+   # Show all temperature values
+   print sp["temperature"]
+
+   # Print things in a more human-friendly manner: one temperature at a time
+   print "(x,  y,  z) Temperature"
+   print "-----------------------"
+   for i in range(sp["temperature"].size):
+       print "(%f,  %f,  %f)    %f" % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i])
 
 .. _available-objects:
 
@@ -53,107 +74,226 @@
 * *Geometric Objects* - Data is selected based on spatial shapes in the dataset
 * *Filtering Objects* - Data is selected based on other field criteria
 * *Collection Objects* - Multiple objects grouped together
-* 
+* *Construction Objects* - Objects represent some sort of data product 
+constructed by additional analysis
 
 Geometric Objects
 ^^^^^^^^^^^^^^^^^
 
-0D
-""
+For 0D, 1D, and 2D geometric objects, if the extent of the object
+intersects a grid cell, then the cell is included in the object; however,
+for 3D objects the *center* of the cell must be within the object in order
+for the grid cell to be incorporated.
 
-**Point** 
-    Aliased to :class:`~yt.data_objects.data_containers.YTPointBase`    
-    Usage: ``point(coords)``
-    A zero-dimensional point defined by a single cell at specified coordinates.
+0D Objects
+""""""""""
 
-1D
-""
+* **Point** 
+    | Class :class:`~yt.data_objects.data_containers.YTPointBase`    
+    | Usage: ``point(coord, ds=None, field_parameters=None)``
+    | A point defined by a single cell at specified coordinates.
 
-**Axis-Aligned Ray** (aliased to :class:`~yt.data_objects.data_containers.YTOrthoRayBase`)
-    | Usage: ``ortho_ray()``
-    | A one-dimensional line of data cells stretching through the full domain aligned with one of the x,y,z axes.
+1D Objects
+""""""""""
 
-**Arbitrary-Aligned Ray** (aliased to :class:`~yt.data_objects.data_containers.YTRayBase`)
-    | Usage: ``ray()``
-    | A one-dimensional line of data cells stretching through the full domain defined by arbitrary start and end coordinates.
+* **Ray (Axis-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTOrthoRayBase`
+    | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None)``
+    | A line (of data cells) stretching through the full domain 
+      aligned with one of the x,y,z axes.  Defined by an axis and a point
+      to be intersected.
 
-2D 
-""
+* **Ray (Arbitrarily-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTRayBase`
+    | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None)``
+    | A line (of data cells) defined by arbitrary start and end coordinates. 
 
-**Axis-Aligned Slice** (aliased to :class:`~yt.data_objects.data_containers.YTSliceBase`)
-    | Usage: ``slice()``
+2D Objects
+""""""""""
 
-**Arbitrary-Aligned Slice** (aliased to :class:`~yt.data_objects.data_containers.YTCuttingPlaneBase`)
-    | Usage: ``cutting()``
+* **Slice (Axis-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTSliceBase`
+    | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None)``
+    | A plane normal to one of the axes and intersecting a particular 
+      coordinate.
 
-3D
-""
+* **Slice (Arbitrarily-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTCuttingPlaneBase`
+    | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None)``
+    | A plane normal to a specified vector and intersecting a particular 
+      coordinate.
 
-**Disk/Cylinder** (aliased to :class:`~yt.data_objects.data_containers.YTDiskBase`)
-    | Usage: ``disk()``
+3D Objects
+""""""""""
 
-**Box Region** (aliased to :class:`~yt.data_objects.data_containers.YTRegionBase`)
-    | Usage: ``region()``
+* **All Data** 
+    | Class :fun:`~yt.data_objects.static_output.Dataset.all_data`
+    | Usage: ``all_data(find_max=False)``
+    | ``all_data()`` is a wrapper on the Box Region class which defaults to 
+      creating a Region covering the entire dataset domain.
 
-**Sphere** (aliased to :class:`~yt.data_objects.data_containers.YTSphereBase`)
-    | Usage: ``sphere()``
+* **Box Region** 
+    | Class :class:`~yt.data_objects.data_containers.YTRegionBase`
+    | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
+    | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
+    | A box-like region aligned with the grid axis orientation.  It is 
+      defined by a left_edge, a right_edge, and a center.  The left_edge
+      and right_edge are the minimum and maximum bounds in the three axes
+      respectively.  The center is arbitrary and must only be contained within
+      the left_edge and right_edge.  By using the ``box`` wrapper, the center
+      is assumed to be the midpoint between the left and right edges.
 
-**Ellipsoid** (aliased to :class:`~yt.data_objects.data_containers.YTEllipsoidBase`)
-    | Usage: ``ellipsoid()``
+* **Disk/Cylinder** 
+    | Class: :class:`~yt.data_objects.data_containers.YTDiskBase`
+    | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None)``
+    | A cylinder defined by point at the center of one of the circular bases,
+      a normal vector to it defining the orientation of the length of the
+      cylinder, and radius and height values for the cylinder's dimensions.
 
-**All Data** (aliased to :class:`~yt.data_objects.data_containers.YTRegionBase`)
-    | Usage: ``all_data()``
+* **Ellipsoid** 
+    | Class :class:`~yt.data_objects.data_containers.YTEllipsoidBase`
+    | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None)``
+    | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
+     semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
+     sets the direction of the semi_major_axis.  tilt defines the orientation 
+     of the semi-medium and semi_minor axes.
 
-Filtering and Grouping Objects
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+* **Sphere** 
+    | Class :class:`~yt.data_objects.data_containers.YTSphereBase`
+    | Usage: ``sphere(center, radius, ds=None, field_parameters=None)``
+    | A sphere defined by a central coordinate and a radius.
 
-**Boolean Regions** (Note: not yet implemented in yt 3.0)
+
+Filtering and Collection Objects
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+See also the section on :ref:`filtering-data`.
+
+* **Boolean Regions** 
+    | **Note: not yet implemented in yt 3.0**
     | Usage: ``boolean()``
+    See :ref:`boolean-data-objects`.
 
-**Mesh Field Filter** (aliased to :class:`~yt.data_objects.data_containers.YTCutRegionBase`)
-    | Usage: ``cut_region()``
+* **Mesh Field Filter** 
+    | Class :class:`~yt.data_objects.data_containers.YTCutRegionBase`
+    | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
+    | A ``cut_region`` is a filter which can be applied to any other data 
+      object.  The filter is defined by the conditionals present, which only
+      work on mesh fields to apply cuts to the data in the object.  For more
+      detailed information and examples, see :ref:`cut-regions`.
 
-**Collection of Data Objects** (aliased to :class:`~yt.data_objects.data_containers.YTDataCollectionBase`)
-    | Usage: ``data_collection()``
+* **Collection of Data Objects** 
+    | Class :class:`~yt.data_objects.data_containers.YTDataCollectionBase`
+    | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
+    | A ``data_collection`` is a list of data objects that can be 
+      sampled and processed as a whole in a single data object.
 
-Data Product Objects
+Construction Objects
 ^^^^^^^^^^^^^^^^^^^^
 
-**Streamline** (aliased to :class:`~yt.data_objects.data_containers.YTStreamlineBase`)
-    | Usage: ``streamline()``
+* **Fixed-Resolution Region** 
+    | Class :class:`~yt.data_objects.data_containers.YTCoveringGridBase`
+    | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
+    | A 3D region with all data extracted to a single, specified resolution.
+      See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-**Projection** (aliased to :class:`~yt.data_objects.data_containers.YTQuadTreeProjBase`)
-    | Usage: ``proj()``
+* **Fixed-Resolution Region with Smoothing** 
+    | Class :class:`~yt.data_objects.data_containers.YTSmoothedCoveringGridBase`
+    | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
+    | A 3D region with all data extracted and interpolated to a single, 
+      specified resolution.  Identical to covering_grid, except that it 
+      interpolates.  See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-**Fixed-Resolution Region** (aliased to :class:`~yt.data_objects.data_containers.YTCoveringGridBase`)
-    | Usage: ``covering_grid()``
+* **Fixed-Resolution Region for Particle Deposition** 
+    | Class :class:`~yt.data_objects.data_containers.YTArbitraryGridBase`
+    | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
+    | When particles are deposited on to mesh fields, they use the existing
+      mesh structure, but this may have too much or too little resolution
+      relative to the particle locations (or it may not exist at all!).  An
+      `arbitrary_grid` provides a means for generating a new independent mesh 
+      structure for particle deposition.  See :ref:`arbitrary-grid` for more 
+      information.
 
-**Fixed-Resolution Region with Smoothing** (aliased to :class:`~yt.data_objects.data_containers.YTSmoothedCoveringGridBase`)
-    | Usage: ``smoothed_covering_grid()``
+* **Projection** 
+    | Class :class:`~yt.data_objects.data_containers.YTQuadTreeProjBase`
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | A 2D projection of a 3D volume along one of the axis directions.  
+      By default, this is a line integral through the entire simulation volume 
+      (although it can be a subset of that volume specified by a data object
+      with the ``data_source`` keyword).  Alternatively, one can specify 
+      a weight_field and different ``style`` values to change the nature
+      of the projection outcome.  See :ref:`projection-types` for more information.
 
-**Fixed-Resolution Region for Particle Deposition** (aliased to :class:`~yt.data_objects.data_containers.YTArbitraryGridBase `)
-    | Usage: ``arbitrary_grid()``
+* **Streamline** 
+    | Class :class:`~yt.data_objects.data_containers.YTStreamlineBase`
+    | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
+    | A ``streamline`` can be traced out by identifying a starting coordinate (or 
+      list of coordinates) and allowing it to trace a vector field, like gas
+      velocity.  See :ref:`streamlines` for more information.
 
-**Surface** (aliased to :class:`~yt.data_objects.data_containers.YTSurfaceBase`)
-    | Usage: ``surface()``
+* **Surface** 
+    | Class :class:`~yt.data_objects.data_containers.YTSurfaceBase`
+    | Usage: ``surface(data_source, field, field_value)``
+    | The surface defined by all an isocontour in any mesh field.  An existing 
+      data object must be provided as the source, as well as a mesh field
+      and the value of the field which you desire the isocontour.  See 
+      :ref:`extracting-isocontour-information`.
 
+.. _derived-quantities:
 
+Processing Objects: Derived Quantities
+--------------------------------------
 
-The following objects are available, all of which hang off of the index
-object.  To access them, you would do something like this (as for a
-:class:`region`):
+Derived quantities are a way of operating on a collection of cells and
+returning a set of values that is fewer in number than the number of cells --
+yt already knows about several.  Every data object (see
+:ref:`data-objects`) provides a mechanism for access to derived quantities.
+These can be accessed via the ``quantities`` interface, like so:
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("RedshiftOutput0005")
-   reg = ds.region([0.5, 0.5, 0.5], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
+   ds = load("my_data")
+   dd = ds.all_data()
+   dd.quantities.angular_momentum_vector()
 
+The following quantities are available via the ``quantities`` interface.
+
+.. include:: _dq_docstrings.inc
+
+Creating Custom Derived Quantities
+++++++++++++++++++++++++++++++++++
+
+The basic idea is that you need to be able to operate both on a set of data,
+and a set of sets of data.  (If this is not possible, the quantity needs to be
+added with the ``force_unlazy`` option.)
+
+Two functions are necessary.  One will operate on arrays of data, either fed
+from each grid individually or fed from the entire data object at once.  The
+second one takes the results of the first, either as lists of arrays or as
+single arrays, and returns the final values.  For an example, we look at the
+``TotalMass`` function:
+
+.. code-block:: python
+
+   def _TotalMass(data):
+       baryon_mass = data["cell_mass"].sum()
+       particle_mass = data["ParticleMassMsun"].sum()
+       return baryon_mass, particle_mass
+   def _combTotalMass(data, baryon_mass, particle_mass):
+       return baryon_mass.sum() + particle_mass.sum()
+   add_quantity("TotalMass", function=_TotalMass,
+                combine_function=_combTotalMass, n_ret = 2)
+
+Once the two functions have been defined, we then call :func:`add_quantity` to
+tell it the function that defines the data, the collator function, and the
+number of values that get passed between them.  In this case we return both the
+particle and the baryon mass, so we have two total values passed from the main
+function into the collator.
 
 .. _arbitrary-grid:
 
-Arbitrary Grids
----------------
+Arbitrary Grids for Particle Deposition
+---------------------------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -218,102 +358,10 @@
 Please see the :ref:`cookbook` for some examples of how to use the boolean
 data object.
 
-.. _derived-quantities:
-
-Processing Objects: Derived Quantities
---------------------------------------
-
-Derived quantities are a way of operating on a collection of cells and
-returning a set of values that is fewer in number than the number of cells --
-yt already knows about several.  Every 3D data object (see
-:ref:`using-objects`) provides a mechanism for access to derived quantities.
-These can be accessed via the ``quantities`` interface, like so:
-
-.. code-block:: python
-
-   ds = load("my_data")
-   dd = ds.all_data()
-   dd.quantities.angular_momentum_vector()
-
-The following quantities are available via the ``quantities`` interface.
-
-.. include:: _dq_docstrings.inc
-
-Creating Derived Quantities
-+++++++++++++++++++++++++++
-
-The basic idea is that you need to be able to operate both on a set of data,
-and a set of sets of data.  (If this is not possible, the quantity needs to be
-added with the ``force_unlazy`` option.)
-
-Two functions are necessary.  One will operate on arrays of data, either fed
-from each grid individually or fed from the entire data object at once.  The
-second one takes the results of the first, either as lists of arrays or as
-single arrays, and returns the final values.  For an example, we look at the
-``TotalMass`` function:
-
-.. code-block:: python
-
-   def _TotalMass(data):
-       baryon_mass = data["cell_mass"].sum()
-       particle_mass = data["ParticleMassMsun"].sum()
-       return baryon_mass, particle_mass
-   def _combTotalMass(data, baryon_mass, particle_mass):
-       return baryon_mass.sum() + particle_mass.sum()
-   add_quantity("TotalMass", function=_TotalMass,
-                combine_function=_combTotalMass, n_ret = 2)
-
-Once the two functions have been defined, we then call :func:`add_quantity` to
-tell it the function that defines the data, the collator function, and the
-number of values that get passed between them.  In this case we return both the
-particle and the baryon mass, so we have two total values passed from the main
-function into the collator.
-
-.. _field_cuts:
-
-Cutting Objects by Field Values
--------------------------------
-
-Data objects can be cut by their field values using the ``cut_region`` 
-method.  For example, this could be used to compute the total gas mass within
-a certain temperature range, as in the following example.
-
-.. notebook-cell::
-
-   import yt
-   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = ds.all_data()
-   total_mass = ad.quantities.total_quantity('cell_mass')
-   # now select only gas with 1e5 K < T < 1e7 K.
-   new_region = ad.cut_region(['obj["temperature"] > 1e5',
-                               'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities.total_quantity('cell_mass')
-   print "The fraction of mass in this temperature range is %f." % \
-     (cut_mass / total_mass)
-
-The ``cut_region`` function generates a new object containing only the cells 
-that meet all of the specified criteria.  The sole argument to ``cut_region`` 
-is a list of strings, where each string is evaluated with an ``eval`` 
-statement.  ``eval`` is a native Python function that evaluates a string as 
-a Python expression.  Any type of data object can be cut with ``cut_region``.  
-Objects generated with ``cut_region`` can be used in the same way as all 
-other data objects.  For example, a cut region can be visualized by giving 
-it as a data_source to a projection.
-
-.. python-script::
-
-   import yt
-   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = ds.all_data()
-   new_region = ad.cut_region(['obj["density"] > 1e-29'])
-   plot = yt.ProjectionPlot(ds, "x", "density", weight_field="density",
-                            data_source=new_region)
-   plot.save()
-
 .. _extracting-connected-sets:
 
-Connected Sets
---------------
+Connected Sets and Clump Finding
+--------------------------------
 
 The underlying machinery used in :ref:`clump_finding` is accessible from any
 data object.  This includes the ability to obtain and examine topologically
@@ -343,8 +391,8 @@
 
 .. _extracting-isocontour-information:
 
-Extracting Isocontour Information
----------------------------------
+Surfaces and Extracting Isocontour Information
+----------------------------------------------
 
 ``yt`` contains an implementation of the `Marching Cubes
 <http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm, which can operate on
@@ -384,16 +432,14 @@
 has a separate set of serialization operations for 2D objects such as
 projections.
 
-.. _parameter_file_serialization:
-
-``yt`` will save out 3D objects to disk under the presupposition that the
+``yt`` will save out objects to disk under the presupposition that the
 construction of the objects is the difficult part, rather than the generation
 of the data -- this means that you can save out an object as a description of
 how to recreate it in space, but not the actual data arrays affiliated with
 that object.  The information that is saved includes the dataset off of
 which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a parameter
-file from whatever limited information it has in the save file.
+difficult; the object, when reloaded, must be able to reconstruct a dataset
+from whatever limited information it has in the save file.
 
 To do this, ``yt`` is able to identify datasets based on a "hash"
 generated from the base file name, the "CurrentTimeIdentifier", and the
@@ -402,45 +448,19 @@
 conjunction they should be uniquely identifying.  (This process is all done in
 :mod:`~yt.utilities.ParameterFileStorage` via :class:`~yt.utilities.ParameterFileStorage.ParameterFileStore`.)
 
-To save an object, you can either save it in the ``.yt`` file affiliated with
-the index or as a standalone file.  For instance, using
-:meth:`~yt.data_objects.index.save_object` we can save a sphere.
+You can save objects to an output file using the function 
+:meth:`~yt.data_objects.index.save_object`: 
 
 .. code-block:: python
 
    import yt
    ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
+   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
+   sp.save_object("sphere_name", "save_file.cpkl")
 
-   ds.save_object(sp, "sphere_to_analyze_later")
-
-
-In a later session, we can load it using
-:meth:`~yt.data_objects.index.load_object`:
-
-.. code-block:: python
-
-   import yt
-
-   ds = yt.load("my_data")
-   sphere_to_analyze = ds.load_object("sphere_to_analyze_later")
-
-Additionally, if we want to store the object independent of the ``.yt`` file,
-we can save the object directly:
-
-.. code-block:: python
-
-   import yt
-
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
-
-   sp.save_object("my_sphere", "my_storage_file.cpkl")
-
-This will store the object as ``my_sphere`` in the file
-``my_storage_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  Note that if a filename is not supplied, it will
-be saved via the index, as above.
+This will store the object as ``sphere_name`` in the file
+``save_file.cpkl``, which will be created or accessed using the standard
+python module :mod:`shelve`.  
 
 To re-load an object saved this way, you can use the shelve module directly:
 
@@ -448,17 +468,12 @@
 
    import yt
    import shelve
+   ds = yt.load("my_data") 
+   saved_fn = shelve.open("save_file.cpkl")
+   ds, sp = saved_fn["sphere_name"]
 
-   ds = yt.load("my_data") # not necessary if storeparameterfiles is on
-
-   obj_file = shelve.open("my_storage_file.cpkl")
-   ds, obj = obj_file["my_sphere"]
-
-If you have turned on ``storeparameterfiles`` in your configuration,
-you won't need to load the parameterfile again, as the load process
-will actually do that for you in that case.  Additionally, we can
-store multiple objects in a single shelve file, so we have to call the
-sphere by name.
+Additionally, we can store multiple objects in a single shelve file, so we 
+have to call the sphere by name.
 
 .. note:: It's also possible to use the standard :mod:`cPickle` module for
           loading and storing objects -- so in theory you could even save a
@@ -466,70 +481,3 @@
 
 This method works for clumps, as well, and the entire clump index will be
 stored and restored upon load.
-
-.. _accessing-fields:
-
-Accessing Fields in Objects
----------------------------
-
-``yt`` utilizes load-on-demand objects to represent physical regions in space.
-(see :ref:`how-yt-thinks-about-data`.)  Data objects in ``yt`` all respect the following
-protocol for accessing data:
-
-.. code-block:: python
-
-   my_object["density"]
-
-where ``"density"`` can be any field name and ``"my_object"`` any one of
-the possible data containers listed at :ref:`available-objects`. For
-example, if we wanted to look at the temperature of cells within a
-spherical region of radius 10 kpc, centered at [0.5, 0.5, 0.5] in our
-simulation box, we would create a sphere object with:
-
-.. code-block:: python
-
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
-
-and then look at the temperature of its cells within it via:
-
-.. code-block:: python
-
-   print sp["temperature"]
-
-Information about how to create a new type of object can be found in
-:ref:`creating-objects`. The field is returned as a single, flattened
-array without spatial information.  The best mechanism for
-manipulating spatial data is the :class:`~yt.data_objects.data_containers.AMRCoveringGridBase` object.
-
-The full list of fields that are available can be found as a property of the
-Hierarchy or Static Output object that you wish to access.  This property is
-calculated every time the object is instantiated.  The full list of fields that
-have been identified in the output file, which need no processing (besides unit
-conversion) are in the property ``field_list`` and the full list of
-potentially-accessible derived fields is available in the property
-``derived_field_list``.  You can see these by examining the two properties:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_list
-   print ds.derived_field_list
-
-When a field is added, it is added to a container that hangs off of the
-dataset, as well.  All of the field creation options
-(:ref:`derived-field-options`) are accessible through this object:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_info["pressure"].get_units()
-
-This is a fast way to examine the units of a given field, and additionally you
-can use :meth:`yt.utilities.pydot.get_source` to get the source code:
-
-.. code-block:: python
-
-   field = ds.field_info["pressure"]
-   print field.get_source()
-
-


https://bitbucket.org/yt_analysis/yt/commits/d8858ae597df/
Changeset:   d8858ae597df
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:30:41
Summary:     Updating the TotalMass docstrings to be accurate.
Affected #:  1 file

diff -r d2148f257d90a527d0fb77118c30669a3cdf7a16 -r d8858ae597df80ee1a8d85657ff76ae116acb42e yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -171,7 +171,8 @@
 
 class TotalMass(TotalQuantity):
     r"""
-    Calculates the total mass in gas and particles.
+    Calculates the total mass in gas and particles. Returns a tuple where the
+    first part is total gas mass and the second part is total particle mass.
 
     Examples
     --------


https://bitbucket.org/yt_analysis/yt/commits/d2901c5be3fa/
Changeset:   d2901c5be3fa
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:31:04
Summary:     Updating the Data Objects docstrings to be accurate.
Affected #:  1 file

diff -r d8858ae597df80ee1a8d85657ff76ae116acb42e -r d2901c5be3fac3817e93773dae3c57c373450d88 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -48,10 +48,17 @@
         periodic its position will be corrected to lie inside
         the range [DLE,DRE) to ensure one and only one cell may
         match that point
+    ds: Dataset, optional
+        An optional dataset to use rather than self.ds
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
-    >>> ds = load("DD0010/moving7_0010")
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
     >>> point = ds.point(c)
     """
@@ -80,14 +87,17 @@
         that this is in the plane coordinates: so if you are casting along
         x, this will be (y,z).  If you are casting along y, this will be
         (x,z).  If you are casting along z, this will be (x,y).
-    fields : list of strings, optional
-        If you want the object to pre-retrieve a set of fields, supply them
-        here.  This is not necessary.
+    ds: Dataset, optional
+        An optional dataset to use rather than self.ds
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
 
-    >>> ds = load("RedshiftOutput0005")
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> oray = ds.ortho_ray(0, (0.2, 0.74))
     >>> print oray["Density"]
     """
@@ -129,14 +139,17 @@
         The place where the ray starts.
     end_point : array-like set of 3 floats
         The place where the ray ends.
-    fields : list of strings, optional
-        If you want the object to pre-retrieve a set of fields, supply them
-        here.  This is not necessary.
+    ds: Dataset, optional
+        An optional dataset to use rather than self.ds
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Examples
     --------
 
-    >>> ds = load("RedshiftOutput0005")
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31))
     >>> print ray["Density"], ray["t"], ray["dts"]
     """
@@ -170,7 +183,7 @@
     domain.
 
     This object is typically accessed through the `slice` object that hangs
-    off of index objects.  AMRSlice is an orthogonal slice through the
+    off of index objects.  Slice is an orthogonal slice through the
     data, taking all the points at the finest resolution available and then
     indexing them.  It is more appropriately thought of as a slice
     'operator' than an object, however, as its field and coordinate can
@@ -185,7 +198,7 @@
         "domain" coordinates.
     center : array_like, optional
         The 'center' supplied to fields that use it.  Note that this does
-        not have to have `coord` as one value.  Strictly optional.
+        not have to have `coord` as one value.  optional.
     ds: Dataset, optional
         An optional dataset to use rather than self.ds
     field_parameters : dictionary
@@ -195,7 +208,8 @@
     Examples
     --------
 
-    >>> ds = load("RedshiftOutput0005")
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> slice = ds.slice(0, 0.25)
     >>> print slice["Density"]
     """
@@ -250,9 +264,9 @@
     simulation domain.
 
     This object is typically accessed through the `cutting` object
-    that hangs off of index objects.  AMRCuttingPlane is an oblique
+    that hangs off of index objects.  A cutting plane is an oblique
     plane through the data, defined by a normal vector and a coordinate.
-    It attempts to guess an 'up' vector, which cannot be overridden, and
+    It attempts to guess an 'north' vector, which can be overridden, and
     then it pixelizes the appropriate data onto the plane without
     interpolation.
 
@@ -261,14 +275,16 @@
     normal : array_like
         The vector that defines the desired plane.  For instance, the
         angular momentum of a sphere.
-    center : array_like, optional
-        The center of the cutting plane.
-    fields : list of strings, optional
-        If you want the object to pre-retrieve a set of fields, supply them
-        here.  This is not necessary.
-    node_name: string, optional
-        The node in the .yt file to find or store this slice at.  Should
-        probably not be used.
+    center : array_like
+        The center of the cutting plane, where the normal vector is anchored.
+    north_vector: array_like, optional
+        An optional vector to describe the north-facing direction in the resulting
+        plane.
+    ds: Dataset, optional
+        An optional dataset to use rather than self.ds
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
 
     Notes
     -----
@@ -281,7 +297,8 @@
     Examples
     --------
 
-    >>> ds = load("RedshiftOutput0005")
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6])
     >>> print cp["Density"]
     """
@@ -292,8 +309,8 @@
     _con_args = ('normal', 'center')
     _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz")
 
-    def __init__(self, normal, center, ds = None,
-                 north_vector = None, field_parameters = None):
+    def __init__(self, normal, center, north_vector = None, 
+                 ds = None, field_parameters = None):
         YTSelectionContainer2D.__init__(self, 4, ds, field_parameters)
         self._set_center(center)
         self.set_field_parameter('center',center)
@@ -325,7 +342,7 @@
         variable-resolution 2D object and transforms it into an NxM bitmap that
         can be plotted, examined or processed.  This is a convenience function
         to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other AMR2DData objects, this does
+        corresponding to_frb function for other 2DData objects, this does
         not accept a 'center' parameter as it is assumed to be centered at the
         center of the cutting plane.
 
@@ -446,7 +463,7 @@
         variable-resolution 2D object and transforms it into an NxM bitmap that
         can be plotted, examined or processed.  This is a convenience function
         to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other AMR2DData objects, this does
+        corresponding to_frb function for other 2DData objects, this does
         not accept a 'center' parameter as it is assumed to be centered at the
         center of the cutting plane.
 
@@ -496,6 +513,34 @@
     By providing a *center*, a *normal*, a *radius* and a *height* we
     can define a cylinder of any proportion.  Only cells whose centers are
     within the cylinder will be selected.
+
+    Parameters
+    ----------
+    center : array_like 
+        coordinate to which the normal, radius, and height all reference; in
+        the center of one of the bases of the cylinder
+    normal : array_like
+        the normal vector defining the direction of lengthwise part of the 
+        cylinder
+    radius : float
+        the radius of the cylinder
+    height : float
+        the height of the lengthwise part of the cylinder
+    fields : array of fields, optional
+        any fields to be pre-loaded in the cylinder object
+    ds: Dataset, optional
+        An optional dataset to use rather than self.ds
+    field_parameters : dictionary
+         A dictionary of field parameters than can be accessed by derived
+         fields.
+
+    Examples
+    --------
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
+    >>> c = [0.5,0.5,0.5]
+    >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc'))
     """
     _type_name = "disk"
     _con_args = ('center', '_norm_vec', '_radius', '_height')
@@ -508,7 +553,6 @@
         self._radius = fix_length(radius, self.ds)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
-
 class YTRegionBase(YTSelectionContainer3D):
     """A 3D region of data with an arbitrary center.
 
@@ -567,7 +611,9 @@
 
     Examples
     --------
-    >>> ds = load("DD0010/moving7_0010")
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
     >>> sphere = ds.sphere(c,1.*ds['kpc'])
     """
@@ -594,11 +640,11 @@
     center : array_like
         The center of the ellipsoid.
     A : float
-        The magnitude of the largest semi-major axis of the ellipsoid.
+        The magnitude of the largest axis (semi-major) of the ellipsoid.
     B : float
-        The magnitude of the medium semi-major axis of the ellipsoid.
+        The magnitude of the medium axis (semi-medium) of the ellipsoid.
     C : float
-        The magnitude of the smallest semi-major axis of the ellipsoid.
+        The magnitude of the smallest axis (semi-minor) of the ellipsoid.
     e0 : array_like (automatically normalized)
         the direction of the largest semi-major axis of the ellipsoid
     tilt : float
@@ -609,7 +655,9 @@
         the z-axis.
     Examples
     --------
-    >>> ds = load("DD####/DD####")
+
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> c = [0.5,0.5,0.5]
     >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2)
     """
@@ -663,7 +711,7 @@
 class YTCutRegionBase(YTSelectionContainer3D):
     """
     This is a data object designed to allow individuals to apply logical
-    operations to fields or particles and filter as a result of those cuts.
+    operations to fields and filter as a result of those cuts.
 
     Parameters
     ----------
@@ -678,7 +726,8 @@
     Examples
     --------
 
-    >>> ds = load("DD0010/moving7_0010")
+    >>> import yt
+    >>> ds = yt.load("RedshiftOutput0005")
     >>> sp = ds.sphere("max", (1.0, 'mpc'))
     >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"])
     """


https://bitbucket.org/yt_analysis/yt/commits/3c7330217ef6/
Changeset:   3c7330217ef6
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 20:39:50
Summary:     Merging.
Affected #:  5 files

diff -r d2901c5be3fac3817e93773dae3c57c373450d88 -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -69,12 +69,3 @@
 .. yt_cookbook:: derived_field.py
 
 .. _cookbook-complex-derived-fields:
-
-Complex Derived Fields
-~~~~~~~~~~~~~~~~~~~~~~
-
-This recipe estimates the ratio of gravitational and pressure forces in a galaxy
-cluster simulation.  This shows how to create and work with vector derived 
-fields.
-
-.. yt_cookbook:: hse_field.py

diff -r d2901c5be3fac3817e93773dae3c57c373450d88 -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -6,7 +6,7 @@
 ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
 data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.], 
-                      (1, 'kpc'), (1, 'kpc'))
+                      (8, 'kpc'), (1, 'kpc'))
 
 # the field to be used for contouring
 field = ("gas", "density")

diff -r d2901c5be3fac3817e93773dae3c57c373450d88 -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 doc/source/help/index.rst
--- a/doc/source/help/index.rst
+++ b/doc/source/help/index.rst
@@ -141,7 +141,7 @@
 
  * Identify which version of the code you’re using. 
 
-   * ``$ yt instinfo`` - provides version information, including changeset hash
+   * ``$ yt version`` - provides version information, including changeset hash
 
 It may be that through the mere process of doing this, you end up solving 
 the problem!

diff -r d2901c5be3fac3817e93773dae3c57c373450d88 -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -11,6 +11,19 @@
 In this document we describe several methods for installing yt. The method that
 will work best for you depends on your precise situation:
 
+* If you do not have root access on your computer, are not comfortable managing
+  python packages, or are working on a supercomputer or cluster computer, you
+  will probably want to use the bash installation script.  This builds python,
+  numpy, matplotlib, and yt from source to set up an isolated scientific python
+  environment inside of a single folder in your home directory. See
+  :ref:`install-script` for more details.
+
+* If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
+  distribution see :ref:`anaconda-installation` for details on how to install
+  yt using the ``conda`` package manager.  Source-based installation from the
+  mercurial repository or via ``pip`` should also work under Anaconda. Note that
+  this is currently the only supported installation mechanism on Windows.
+
 * If you already have a scientific python software stack installed on your
   computer and are comfortable installing python packages,
   :ref:`source-installation` will probably be the best choice. If you have set
@@ -21,136 +34,6 @@
   have the the necessary compilers installed (e.g. the ``build-essentials``
   package on debian and ubuntu).
 
-* If you use the `Anaconda <https://store.continuum.io/cshop/anaconda/>`_ python
-  distribution see :ref:`anaconda-installation` for details on how to install
-  yt using the ``conda`` package manager.  Source-based installation from the
-  mercurial repository or via ``pip`` should also work under Anaconda. Note that
-  this is currently the only supported installation mechanism on Windows.
-
-* If you do not have root access on your computer, are not comfortable managing
-  python packages, or are working on a supercomputer or cluster computer, you
-  will probably want to use the bash installation script.  This builds python,
-  numpy, matplotlib, and yt from source to set up an isolated scientific python
-  environment inside of a single folder in your home directory. See
-  :ref:`install-script` for more details.
-
-.. _source-installation:
-
-Installing yt Using pip or from Source
-++++++++++++++++++++++++++++++++++++++
-
-To install yt from source, you must make sure you have yt's dependencies
-installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
-``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
-can use ``pip`` (which comes with ``Python``) to install the latest stable
-version of yt:
-
-.. code-block:: bash
-
-  $ pip install yt
-
-The source code for yt may be found at the Bitbucket project site and can also
-be utilized for installation. If you prefer to install the development version
-of yt instead of the latest stable release, you will need ``mercurial`` to clone
-the official repo:
-
-.. code-block:: bash
-
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py install --user
-
-This will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
-Please refer to ``setuptools`` documentation for the additional options.
-
-If you will be modifying yt, you can also make the clone of the yt mercurial
-repository the "active" installed copy:
-
-..code-block:: bash
-
-  hg clone https://bitbucket.org/yt_analysis/yt
-  cd yt
-  hg update yt
-  python setup.py develop  
-
-If you choose this installation method, you do not need to run any activation
-script since this will install yt into your global python environment.
-
-Keeping yt Updated via Mercurial
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-If you want to maintain your yt installation via updates straight from the
-Bitbucket repository or if you want to do some development on your own, we
-suggest you check out some of the :ref:`development docs <contributing-code>`,
-especially the sections on :ref:`Mercurial <mercurial-with-yt>` and
-:ref:`building yt from source <building-yt>`.
-
-You can also make use of the following command to keep yt up to date from the
-command line:
-
-.. code-block:: bash
-
-  yt update
-
-This will detect that you have installed yt from the mercurial repository, pull
-any changes from bitbucket, and then recompile yt if necessary.
-
-.. _anaconda-installation:
-
-Installing yt Using Anaconda
-++++++++++++++++++++++++++++
-
-Perhaps the quickest way to get yt up and running is to install it using the
-`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
-which will provide you with a easy-to-use environment for installing Python
-packages.
-
-If you do not want to install the full anaconda python distribution, you can
-install a bare-bones Python installation using miniconda.  To install miniconda,
-visit http://repo.continuum.io/miniconda/ and download a recent version of the
-``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
-system architecture. Next, run the script, e.g.:
-
-.. code-block:: bash
-
-  bash Miniconda-3.3.0-Linux-x86_64.sh
-
-Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
-
-.. code-block:: bash
-
-  conda install yt
-
-which will install yt along with all of its dependencies.
-
-Recipes to build conda packages for yt are available at
-https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
-clone the conda-recipes repository
-
-.. code-block:: bash
-
-  git clone https://github.com/conda/conda-recipes
-
-Then navigate to the repository root and invoke `conda build`:
-
-.. code-block:: bash
-
-  cd conda-recipes
-  conda build ./yt/
-
-Note that building a yt conda package requires a C compiler.
-
-.. _windows-installation:
-
-Installing yt on Windows
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-Installation on Microsoft Windows is only supported for Windows XP Service Pack
-3 and higher (both 32-bit and 64-bit) using Anaconda, see
-:ref:`anaconda-installation`.  Also see :ref:`windows-developing` for details on
-how to build yt from source in Windows.
-
 .. _install-script:
 
 All-in-one installation script
@@ -265,6 +148,168 @@
 code, this is a last defense for solving: remove and then fully
 :ref:`re-install <installing-yt>` from the install script again.
 
+.. _anaconda-installation:
+
+Installing yt Using Anaconda
+++++++++++++++++++++++++++++
+
+Perhaps the quickest way to get yt up and running is to install it using the
+`Anaconda Python Distribution <https://store.continuum.io/cshop/anaconda/>`_,
+which will provide you with a easy-to-use environment for installing Python
+packages.
+
+If you do not want to install the full anaconda python distribution, you can
+install a bare-bones Python installation using miniconda.  To install miniconda,
+visit http://repo.continuum.io/miniconda/ and download a recent version of the
+``Miniconda-x.y.z`` script (corresponding to Python 2.7) for your platform and
+system architecture. Next, run the script, e.g.:
+
+.. code-block:: bash
+
+  bash Miniconda-3.3.0-Linux-x86_64.sh
+
+Make sure that the Anaconda ``bin`` directory is in your path, and then issue:
+
+.. code-block:: bash
+
+  conda install yt
+
+which will install yt along with all of its dependencies.
+
+Recipes to build conda packages for yt are available at
+https://github.com/conda/conda-recipes.  To build the yt conda recipe, first
+clone the conda-recipes repository
+
+.. code-block:: bash
+
+  git clone https://github.com/conda/conda-recipes
+
+Then navigate to the repository root and invoke `conda build`:
+
+.. code-block:: bash
+
+  cd conda-recipes
+  conda build ./yt/
+
+Note that building a yt conda package requires a C compiler.
+
+.. _windows-installation:
+
+Installing yt on Windows
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+Installation on Microsoft Windows is only supported for Windows XP Service Pack
+3 and higher (both 32-bit and 64-bit) using Anaconda, see
+:ref:`anaconda-installation`.  Also see :ref:`windows-developing` for details on
+how to build yt from source in Windows.
+
+.. _source-installation:
+
+Installing yt Using pip or from Source
+++++++++++++++++++++++++++++++++++++++
+
+To install yt from source, you must make sure you have yt's dependencies
+installed on your system.  These include: a C compiler, ``HDF5``, ``python``,
+``Cython``, ``NumPy``, ``matplotlib``, ``sympy``, and ``h5py``. From here, you
+can use ``pip`` (which comes with ``Python``) to install the latest stable
+version of yt:
+
+.. code-block:: bash
+
+  $ pip install yt
+
+The source code for yt may be found at the Bitbucket project site and can also
+be utilized for installation. If you prefer to install the development version
+of yt instead of the latest stable release, you will need ``mercurial`` to clone
+the official repo:
+
+.. code-block:: bash
+
+  hg clone https://bitbucket.org/yt_analysis/yt
+  cd yt
+  hg update yt
+  python setup.py install --user
+
+.. note::
+
+  If you maintain your own python installation separate from the OS-level python
+  installation, you can leave off ``--user --prefix=``, although you might need
+  ``sudo`` depending on where python is installed. See _`This StackOverflow
+  discussion
+  <http://stackoverflow.com/questions/4495120/combine-user-with-prefix-error-with-setup-py-install>`
+  if you are curious why ``--prefix=`` is neccessary on systems.
+
+This will install yt into a folder in your home directory
+(``$HOME/.local/lib64/python2.7/site-packages`` on Linux,
+``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to
+the ``setuptools`` documentation for the additional options.
+
+If you choose this installation method, you do not need to run any activation
+script since this will install yt into your global python environment.
+
+If you will be modifying yt, you can also make the clone of the yt mercurial
+repository the "active" installed copy:
+
+.. code-block:: bash
+
+  hg clone https://bitbucket.org/yt_analysis/yt
+  cd yt
+  hg update yt
+  python setup.py develop --user
+
+As above, you can leave off ``--user`` if you want to install yt into the default
+package install path.  If you do not have write access for this location, you
+might need to use ``sudo``.
+
+Switching to yt 2.x
+^^^^^^^^^^^^^^^^^^^
+
+With the release of version 3.0 of yt, development of the legacy yt 2.x series
+has been relegated to bugfixes.  That said, we will continue supporting the 2.x
+series for the forseeable future.  This makes it easy to use scripts written
+for older versions of yt without substantially updating them to support the
+new field naming or unit systems in yt version 3.
+
+Currently, the yt-2.x codebase is contained in a named branch in the yt
+mercurial repository.  First, remove any extant installations of yt on your
+system:
+
+.. code-block:: bash
+
+  pip uninstall yt
+
+To switch to yt-2.x, you will need to clone the mercurial repository as
+described in :ref:`source-installation`.  Next, you will need to navigate to the
+mercurial repository, update to the `yt-2.x` branch, and recompile:
+
+.. code-block:: bash
+
+  cd yt
+  hg update yt-2.x
+  python setup.py develop --user
+
+You can check which version of yt you have installed by invoking ``yt version``
+at the command line.
+
+Keeping yt Updated via Mercurial
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+If you want to maintain your yt installation via updates straight from the
+Bitbucket repository or if you want to do some development on your own, we
+suggest you check out some of the :ref:`development docs <contributing-code>`,
+especially the sections on :ref:`Mercurial <mercurial-with-yt>` and
+:ref:`building yt from source <building-yt>`.
+
+You can also make use of the following command to keep yt up to date from the
+command line:
+
+.. code-block:: bash
+
+  yt update
+
+This will detect that you have installed yt from the mercurial repository, pull
+any changes from bitbucket, and then recompile yt if necessary.
+
 .. _testing-installation:
 
 Testing Your Installation

diff -r d2901c5be3fac3817e93773dae3c57c373450d88 -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 doc/source/reference/command-line.rst
--- a/doc/source/reference/command-line.rst
+++ b/doc/source/reference/command-line.rst
@@ -64,7 +64,8 @@
                         (http://hub.yt-project.org/), creating a BitBucket
                         repo in the process if necessary.
     instinfo            Get some information about the yt installation
-    version             Get some information about the yt installation
+    version             Get some information about the yt installation (this
+                        is an alias for instinfo).
     load                Load a single dataset into an IPython instance
     pastebin            Post a script to an anonymous pastebin
     pastebin_grab       Print an online pastebin to STDOUT for local use.


https://bitbucket.org/yt_analysis/yt/commits/392a066b43a3/
Changeset:   392a066b43a3
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 21:27:04
Summary:     Updating the derived quantities list in the objects docs to be accurate.
Affected #:  3 files

diff -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 -r 392a066b43a3004fb1a523dd931eb2be9c7958cf doc/source/analyzing/_dq_docstrings.inc
--- a/doc/source/analyzing/_dq_docstrings.inc
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-.. function:: angular_momentum_vector()
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.AngularMomentumVector`.)
-   This function returns the mass-weighted average angular momentum vector.
-
-
-.. function:: bulk_velocity():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.BulkVelocity`.)
-   This function returns the mass-weighted average velocity in the object.
-
-
-.. function:: center_of_mass(use_cells=True, use_particles=False):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.CenterOfMass`.)
-   This function returns the location of the center
-   of mass. By default, it computes of the *non-particle* data in the object. 
-   
-   Parameters
-   ----------
-   
-   use_cells : bool
-       If True, will include the cell mass (default: True)
-   use_particles : bool
-       if True, will include the particles in the object (default: False)
-
-
-
-.. function:: extrema(fields, non_zero=False, filter=None):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.Extrema`.)
-   This function returns the extrema of a set of fields
-   
-   :param fields: A field name, or a list of field names
-   :param filter: a string to be evaled to serve as a data filter.
-
-
-.. function:: max_location(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.max_location`.)
-   This function returns the location of the maximum of a set
-   of fields.
-
-
-.. function:: min_location(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.MinLocation`.)
-   This function returns the location of the minimum of a set
-   of fields.
-
-
-
-.. function:: spin_parameter(use_gas=True, use_particles=True):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.SpinParameter`.)
-   This function returns the spin parameter for the baryons, but it uses
-   the particles in calculating enclosed mass.
-
-
-.. function:: total_mass():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalMass`.)
-   This function takes no arguments and returns the sum of cell masses and
-   particle masses in the object.
-
-
-.. function:: total_quantity(fields):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalQuantity`.)
-   This function sums up a given field over the entire region
-   
-   :param fields: The fields to sum up
-
-
-
-.. function:: weighted_average_quantity(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`.)
-   This function returns an averaged quantity.
-   
-   :param field: The field to average
-   :param weight: The field to weight by
-
-.. function:: weighted_variance(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedVariance`.)
-    This function returns the variance of a field.
-
-    :param field: The target field
-    :param weight: The field to weight by
-
-    Returns the weighted variance and the weighted mean.

diff -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 -r 392a066b43a3004fb1a523dd931eb2be9c7958cf doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ /dev/null
@@ -1,150 +0,0 @@
-
-
-.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
-
-
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
-
-
-.. class:: cut_region(self, base_region, field_cuts, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.InLineExtractedRegionBase`.)
-
-
-.. class:: cutting(self, normal, center, fields=None, node_name=None, north_vector=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
-
-
-.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
-
-
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
-
-
-.. class:: extracted_region(self, base_region, indices, force_refresh=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.ExtractedRegionBase`.)
-
-
-.. class:: fixed_res_cutting(self, normal, center, width, dims, fields=None, node_name=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
-
-
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
-
-
-.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
-
-
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
-
-
-.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
-
-
-.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
-
-
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
-
-
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
-
-
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
-
-
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
-
-
-.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
-
-
-.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
-
-
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
-
-
-.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
-
-
-.. class:: smoothed_covering_grid(self, *args, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
-
-
-.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
-
-
-.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)
-
-
-.. class:: surface(self, data_source, surface_field, field_value):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSurfaceBase`.)

diff -r 3c7330217ef6cbcea4a5fdc8931796f01ddc93c7 -r 392a066b43a3004fb1a523dd931eb2be9c7958cf doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -88,7 +88,7 @@
 0D Objects
 """"""""""
 
-* **Point** 
+**Point** 
     | Class :class:`~yt.data_objects.data_containers.YTPointBase`    
     | Usage: ``point(coord, ds=None, field_parameters=None)``
     | A point defined by a single cell at specified coordinates.
@@ -96,14 +96,14 @@
 1D Objects
 """"""""""
 
-* **Ray (Axis-Aligned)** 
+**Ray (Axis-Aligned)** 
     | Class :class:`~yt.data_objects.data_containers.YTOrthoRayBase`
     | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None)``
     | A line (of data cells) stretching through the full domain 
       aligned with one of the x,y,z axes.  Defined by an axis and a point
       to be intersected.
 
-* **Ray (Arbitrarily-Aligned)** 
+**Ray (Arbitrarily-Aligned)** 
     | Class :class:`~yt.data_objects.data_containers.YTRayBase`
     | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None)``
     | A line (of data cells) defined by arbitrary start and end coordinates. 
@@ -111,13 +111,13 @@
 2D Objects
 """"""""""
 
-* **Slice (Axis-Aligned)** 
+**Slice (Axis-Aligned)** 
     | Class :class:`~yt.data_objects.data_containers.YTSliceBase`
     | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None)``
     | A plane normal to one of the axes and intersecting a particular 
       coordinate.
 
-* **Slice (Arbitrarily-Aligned)** 
+**Slice (Arbitrarily-Aligned)** 
     | Class :class:`~yt.data_objects.data_containers.YTCuttingPlaneBase`
     | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None)``
     | A plane normal to a specified vector and intersecting a particular 
@@ -126,13 +126,13 @@
 3D Objects
 """"""""""
 
-* **All Data** 
+**All Data** 
     | Class :fun:`~yt.data_objects.static_output.Dataset.all_data`
     | Usage: ``all_data(find_max=False)``
     | ``all_data()`` is a wrapper on the Box Region class which defaults to 
       creating a Region covering the entire dataset domain.
 
-* **Box Region** 
+**Box Region** 
     | Class :class:`~yt.data_objects.data_containers.YTRegionBase`
     | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
     | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
@@ -143,14 +143,14 @@
       the left_edge and right_edge.  By using the ``box`` wrapper, the center
       is assumed to be the midpoint between the left and right edges.
 
-* **Disk/Cylinder** 
+**Disk/Cylinder** 
     | Class: :class:`~yt.data_objects.data_containers.YTDiskBase`
     | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None)``
     | A cylinder defined by point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
-* **Ellipsoid** 
+**Ellipsoid** 
     | Class :class:`~yt.data_objects.data_containers.YTEllipsoidBase`
     | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None)``
     | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
@@ -158,7 +158,7 @@
      sets the direction of the semi_major_axis.  tilt defines the orientation 
      of the semi-medium and semi_minor axes.
 
-* **Sphere** 
+**Sphere** 
     | Class :class:`~yt.data_objects.data_containers.YTSphereBase`
     | Usage: ``sphere(center, radius, ds=None, field_parameters=None)``
     | A sphere defined by a central coordinate and a radius.
@@ -169,12 +169,12 @@
 
 See also the section on :ref:`filtering-data`.
 
-* **Boolean Regions** 
+**Boolean Regions** 
     | **Note: not yet implemented in yt 3.0**
     | Usage: ``boolean()``
     See :ref:`boolean-data-objects`.
 
-* **Mesh Field Filter** 
+**Mesh Field Filter** 
     | Class :class:`~yt.data_objects.data_containers.YTCutRegionBase`
     | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
     | A ``cut_region`` is a filter which can be applied to any other data 
@@ -182,7 +182,7 @@
       work on mesh fields to apply cuts to the data in the object.  For more
       detailed information and examples, see :ref:`cut-regions`.
 
-* **Collection of Data Objects** 
+**Collection of Data Objects** 
     | Class :class:`~yt.data_objects.data_containers.YTDataCollectionBase`
     | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
     | A ``data_collection`` is a list of data objects that can be 
@@ -191,20 +191,20 @@
 Construction Objects
 ^^^^^^^^^^^^^^^^^^^^
 
-* **Fixed-Resolution Region** 
+**Fixed-Resolution Region** 
     | Class :class:`~yt.data_objects.data_containers.YTCoveringGridBase`
     | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted to a single, specified resolution.
       See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-* **Fixed-Resolution Region with Smoothing** 
+**Fixed-Resolution Region with Smoothing** 
     | Class :class:`~yt.data_objects.data_containers.YTSmoothedCoveringGridBase`
     | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted and interpolated to a single, 
       specified resolution.  Identical to covering_grid, except that it 
       interpolates.  See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
-* **Fixed-Resolution Region for Particle Deposition** 
+**Fixed-Resolution Region for Particle Deposition** 
     | Class :class:`~yt.data_objects.data_containers.YTArbitraryGridBase`
     | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
     | When particles are deposited on to mesh fields, they use the existing
@@ -214,7 +214,7 @@
       structure for particle deposition.  See :ref:`arbitrary-grid` for more 
       information.
 
-* **Projection** 
+**Projection** 
     | Class :class:`~yt.data_objects.data_containers.YTQuadTreeProjBase`
     | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
     | A 2D projection of a 3D volume along one of the axis directions.  
@@ -224,14 +224,14 @@
       a weight_field and different ``style`` values to change the nature
       of the projection outcome.  See :ref:`projection-types` for more information.
 
-* **Streamline** 
+**Streamline** 
     | Class :class:`~yt.data_objects.data_containers.YTStreamlineBase`
     | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
     | A ``streamline`` can be traced out by identifying a starting coordinate (or 
       list of coordinates) and allowing it to trace a vector field, like gas
       velocity.  See :ref:`streamlines` for more information.
 
-* **Surface** 
+**Surface** 
     | Class :class:`~yt.data_objects.data_containers.YTSurfaceBase`
     | Usage: ``surface(data_source, field, field_value)``
     | The surface defined by all an isocontour in any mesh field.  An existing 
@@ -256,12 +256,77 @@
    dd = ds.all_data()
    dd.quantities.angular_momentum_vector()
 
-The following quantities are available via the ``quantities`` interface.
+Available Derived Quantities
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-.. include:: _dq_docstrings.inc
+**Angular Momentum Vector**
+    | Function :func:`~yt.data_objects.derived_quantities.AngularMomentumVector`
+    | Usage: ``angular_momentum_vector(use_gas=True, use_particles=True)``
+    | This function returns the mass-weighted average angular momentum vector.
+
+**Bulk Velocity**
+    | Function :func:`~yt.data_objects.derived_quantities.BulkVelocity`
+    | Usage: ``bulk_velocity(use_gas=True, use_particles=True)``
+    | This function returns the mass-weighted average velocity in the object.
+
+**Center of Mass**
+    | Function :func:`~yt.data_objects.derived_quantities.CenterOfMass`
+    | Usage: ``center_of_mass(use_cells=True, use_particles=False)``
+    | This function returns the location of the center
+      of mass. By default, it computes of the *non-particle* data in the object.
+
+**Extrema**
+    | Function :func:`~yt.data_objects.derived_quantities.Extrema`
+    | Usage: ``extrema(fields, non_zero=False)``
+    | This function returns the extrema of a field or list of fields.
+
+**Maximum Location**
+    | Function :func:`~yt.data_objects.derived_quantities.max_location`
+    | Usage: ``max_location(fields)``
+    | This function returns the maximum of a field or list of fields as well
+      as the x,y,z location of that maximum.
+
+**Minimum Location**
+    | Function :func:`~yt.data_objects.derived_quantities.min_location`
+    | Usage: ``min_location(fields)``
+    | This function returns the minimum of a field or list of fields as well
+      as the x,y,z location of that minimum.
+
+**Spin Parameter**
+    | Function :func:`~yt.data_objects.derived_quantities.SpinParameter`
+    | Usage: ``spin_parameter(use_gas=True, use_particles=True)``
+    | This function returns the spin parameter for the baryons, but it uses
+      the particles in calculating enclosed mass.
+
+**Total Mass**
+    | Function :func:`~yt.data_objects.derived_quantities.TotalMass`
+    | Usage: ``total_mass()``
+    | This function takes no arguments and returns a tuple containing the sum of 
+      mesh-cell masses and particle masses in the object.
+
+**Total of a Field**
+    | Function :func:`~yt.data_objects.derived_quantities.TotalQuantity`
+    | Usage: ``total_quantity(fields)``
+    | This function sums up a given field (or list of fields) over the entire 
+      region.
+
+**Weighted Average of a Field**
+    | Function :func:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`
+    | Usage: ``weighted_average_quantity(fields, weight)``
+    | This function returns a weighted average of a field (or list of fields)
+      over an entire data object.  If you want an unweighted average, 
+      then set your weight to be the field: ``ones``.
+
+**Weighted Variance of a Field**
+    | Function :func:`~yt.data_objects.derived_quantities.WeightedVariance`
+    | Usage: ``weighted_variance(fields, weight)``
+    | This function returns weighted variance of a field (or list of fields)
+      over an entire data object and the weighted mean.  
+      If you want an unweighted variance, then 
+      set your weight to be the field: ``ones``.
 
 Creating Custom Derived Quantities
-++++++++++++++++++++++++++++++++++
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 The basic idea is that you need to be able to operate both on a set of data,
 and a set of sets of data.  (If this is not possible, the quantity needs to be


https://bitbucket.org/yt_analysis/yt/commits/539c6607bf84/
Changeset:   539c6607bf84
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 21:29:24
Summary:     Fixing header information in objects docs.
Affected #:  1 file

diff -r 392a066b43a3004fb1a523dd931eb2be9c7958cf -r 539c6607bf84cf090f3c009bfa69b934858303e0 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -357,8 +357,8 @@
 
 .. _arbitrary-grid:
 
-Arbitrary Grids for Particle Deposition
----------------------------------------
+Arbitrary Grids Objects for Particle Deposition
+-----------------------------------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -456,8 +456,8 @@
 
 .. _extracting-isocontour-information:
 
-Surfaces and Extracting Isocontour Information
-----------------------------------------------
+Surface Objects and Extracting Isocontour Information
+-----------------------------------------------------
 
 ``yt`` contains an implementation of the `Marching Cubes
 <http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm, which can operate on


https://bitbucket.org/yt_analysis/yt/commits/64b536cb8e55/
Changeset:   64b536cb8e55
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 21:38:56
Summary:     Correcting an incorrect docstring
Affected #:  1 file

diff -r 539c6607bf84cf090f3c009bfa69b934858303e0 -r 64b536cb8e5555ae76a458cebc7a06d1c30b3e36 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -208,7 +208,7 @@
     use_particles : bool
         Flag to include particles in the calculation.  Particles are ignored 
         if not present.
-        Default: True
+        Default: False
 
     Examples
     --------


https://bitbucket.org/yt_analysis/yt/commits/8502446cd030/
Changeset:   8502446cd030
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 21:44:26
Summary:     Small corrections thanks to nathan.
Affected #:  2 files

diff -r 64b536cb8e5555ae76a458cebc7a06d1c30b3e36 -r 8502446cd030d1d490e251541b9f28690fa8eaea doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -183,19 +183,22 @@
 
 When a field is added, it is added to a container that hangs off of the
 dataset, as well.  All of the field creation options
-(:ref:`derived-field-options`) are accessible through this object:
+(:ref:`derived-field-options`) are accessible through this object.  One
+can get the default units of a field by first indexing and then using the
+``get_units()`` function on the full field name (e.g. ("gas", "pressure")):
 
 .. code-block:: python
 
    ds = yt.load("my_data")
-   print ds.field_info["pressure"].get_units()
+   ds.index
+   print ds.field_info["gas", "pressure"].get_units()
 
 This is a fast way to examine the units of a given field, and additionally you
 can use :meth:`yt.utilities.pydot.get_source` to get the source code for a field:
 
 .. code-block:: python
 
-   field = ds.field_info["pressure"]
+   field = ds.field_info["gas", "pressure"]
    print field.get_source()
 
 Particle Fields

diff -r 64b536cb8e5555ae76a458cebc7a06d1c30b3e36 -r 8502446cd030d1d490e251541b9f28690fa8eaea doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -12,7 +12,7 @@
 of data (e.g. sphere, region--a 3D box, cylinder, etc.).  Others represent 
 data products derived from your dataset (e.g. slices, streamlines, surfaces).
 Still other data objects group multiple objects together or filter them
-(e.g. data dollection, cut region).  
+(e.g. data collection, cut region).  
 
 To generate standard plots, objects rarely need to be directly constructed.
 However, for detailed data inspection as well as hand-crafted derived data,


https://bitbucket.org/yt_analysis/yt/commits/7d76c64806e9/
Changeset:   7d76c64806e9
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 22:08:08
Summary:     Updating the derived quantity docs to reflect that they are classes, not functions.
Affected #:  1 file

diff -r 8502446cd030d1d490e251541b9f28690fa8eaea -r 7d76c64806e9316176124bf8093555e4189095fa doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -130,7 +130,8 @@
     | Class :fun:`~yt.data_objects.static_output.Dataset.all_data`
     | Usage: ``all_data(find_max=False)``
     | ``all_data()`` is a wrapper on the Box Region class which defaults to 
-      creating a Region covering the entire dataset domain.
+      creating a Region covering the entire dataset domain.  It is effectively 
+      ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
 
 **Box Region** 
     | Class :class:`~yt.data_objects.data_containers.YTRegionBase`
@@ -146,7 +147,7 @@
 **Disk/Cylinder** 
     | Class: :class:`~yt.data_objects.data_containers.YTDiskBase`
     | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None)``
-    | A cylinder defined by point at the center of one of the circular bases,
+    | A cylinder defined by a point at the center of one of the circular bases,
       a normal vector to it defining the orientation of the length of the
       cylinder, and radius and height values for the cylinder's dimensions.
 
@@ -244,83 +245,84 @@
 Processing Objects: Derived Quantities
 --------------------------------------
 
-Derived quantities are a way of operating on a collection of cells and
-returning a set of values that is fewer in number than the number of cells --
-yt already knows about several.  Every data object (see
-:ref:`data-objects`) provides a mechanism for access to derived quantities.
-These can be accessed via the ``quantities`` interface, like so:
+Derived quantities are a way of calculating some bulk quantities associated
+with all of the grid cells contained in a data object.  There are several 
+built-in to ``yt``, but you can create your own custome derived quantities 
+as well.  Derived quantities can be accessed via the ``quantities`` interface.
+Here is an example of how to get the angular momentum vector calculated from 
+all the cells contained in a sphere at the center of our dataset.
 
 .. code-block:: python
 
    ds = load("my_data")
-   dd = ds.all_data()
-   dd.quantities.angular_momentum_vector()
+   sp = ds.sphere('c', (10, 'kpc'))
+   print ad.quantities.angular_momentum_vector()
 
 Available Derived Quantities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
 **Angular Momentum Vector**
-    | Function :func:`~yt.data_objects.derived_quantities.AngularMomentumVector`
+    | Class :class:`~yt.data_objects.derived_quantities.AngularMomentumVector`
     | Usage: ``angular_momentum_vector(use_gas=True, use_particles=True)``
-    | This function returns the mass-weighted average angular momentum vector.
+    | The mass-weighted average angular momentum vector of the particles, gas, 
+      or both.
 
 **Bulk Velocity**
-    | Function :func:`~yt.data_objects.derived_quantities.BulkVelocity`
+    | Class :class:`~yt.data_objects.derived_quantities.BulkVelocity`
     | Usage: ``bulk_velocity(use_gas=True, use_particles=True)``
-    | This function returns the mass-weighted average velocity in the object.
+    | The mass-weighted average velocity of the particles, gas, or both.
 
 **Center of Mass**
-    | Function :func:`~yt.data_objects.derived_quantities.CenterOfMass`
+    | Class :class:`~yt.data_objects.derived_quantities.CenterOfMass`
     | Usage: ``center_of_mass(use_cells=True, use_particles=False)``
-    | This function returns the location of the center
-      of mass. By default, it computes of the *non-particle* data in the object.
+    | The location of the center of mass. By default, it computes of 
+      the *non-particle* data in the object, but it can be used on 
+      particles, gas, or both.
 
 **Extrema**
-    | Function :func:`~yt.data_objects.derived_quantities.Extrema`
+    | Class :class:`~yt.data_objects.derived_quantities.Extrema`
     | Usage: ``extrema(fields, non_zero=False)``
-    | This function returns the extrema of a field or list of fields.
+    | The extrema of a field or list of fields.
 
 **Maximum Location**
-    | Function :func:`~yt.data_objects.derived_quantities.max_location`
+    | Class :class:`~yt.data_objects.derived_quantities.max_location`
     | Usage: ``max_location(fields)``
-    | This function returns the maximum of a field or list of fields as well
+    | The maximum of a field or list of fields as well
       as the x,y,z location of that maximum.
 
 **Minimum Location**
-    | Function :func:`~yt.data_objects.derived_quantities.min_location`
+    | Class :class:`~yt.data_objects.derived_quantities.min_location`
     | Usage: ``min_location(fields)``
-    | This function returns the minimum of a field or list of fields as well
+    | The minimum of a field or list of fields as well
       as the x,y,z location of that minimum.
 
 **Spin Parameter**
-    | Function :func:`~yt.data_objects.derived_quantities.SpinParameter`
+    | Class :class:`~yt.data_objects.derived_quantities.SpinParameter`
     | Usage: ``spin_parameter(use_gas=True, use_particles=True)``
-    | This function returns the spin parameter for the baryons, but it uses
-      the particles in calculating enclosed mass.
+    | The spin parameter for the baryons using the particles, gas, or both.
 
 **Total Mass**
-    | Function :func:`~yt.data_objects.derived_quantities.TotalMass`
+    | Class :class:`~yt.data_objects.derived_quantities.TotalMass`
     | Usage: ``total_mass()``
-    | This function takes no arguments and returns a tuple containing the sum of 
-      mesh-cell masses and particle masses in the object.
+    | The total mass of the object as a tuple of (total gas, total particle)
+      mass.
 
 **Total of a Field**
-    | Function :func:`~yt.data_objects.derived_quantities.TotalQuantity`
+    | Class :class:`~yt.data_objects.derived_quantities.TotalQuantity`
     | Usage: ``total_quantity(fields)``
-    | This function sums up a given field (or list of fields) over the entire 
-      region.
+    | The sum of a given field (or list of fields) over the entire object.
 
 **Weighted Average of a Field**
-    | Function :func:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`
+    | Class :class:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`
     | Usage: ``weighted_average_quantity(fields, weight)``
-    | This function returns a weighted average of a field (or list of fields)
+    | The weighted average of a field (or list of fields)
       over an entire data object.  If you want an unweighted average, 
       then set your weight to be the field: ``ones``.
 
 **Weighted Variance of a Field**
-    | Function :func:`~yt.data_objects.derived_quantities.WeightedVariance`
+    | Class :class:`~yt.data_objects.derived_quantities.WeightedVariance`
     | Usage: ``weighted_variance(fields, weight)``
-    | This function returns weighted variance of a field (or list of fields)
+    | The weighted variance of a field (or list of fields)
       over an entire data object and the weighted mean.  
       If you want an unweighted variance, then 
       set your weight to be the field: ``ones``.


https://bitbucket.org/yt_analysis/yt/commits/07232b3d2814/
Changeset:   07232b3d2814
Branch:      yt-3.0
User:        chummels
Date:        2014-07-28 22:14:57
Summary:     Removing subsection on creating a custom derived quantity for your data from the object docs.  This functionality is currently broken in yt 3, and it needs to be fixed before we advertise it.
Affected #:  1 file

diff -r 7d76c64806e9316176124bf8093555e4189095fa -r 07232b3d2814f0a0b9db90568e2991db1c97127c doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -246,9 +246,8 @@
 --------------------------------------
 
 Derived quantities are a way of calculating some bulk quantities associated
-with all of the grid cells contained in a data object.  There are several 
-built-in to ``yt``, but you can create your own custome derived quantities 
-as well.  Derived quantities can be accessed via the ``quantities`` interface.
+with all of the grid cells contained in a data object.  
+Derived quantities can be accessed via the ``quantities`` interface.
 Here is an example of how to get the angular momentum vector calculated from 
 all the cells contained in a sphere at the center of our dataset.
 
@@ -327,36 +326,6 @@
       If you want an unweighted variance, then 
       set your weight to be the field: ``ones``.
 
-Creating Custom Derived Quantities
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
-The basic idea is that you need to be able to operate both on a set of data,
-and a set of sets of data.  (If this is not possible, the quantity needs to be
-added with the ``force_unlazy`` option.)
-
-Two functions are necessary.  One will operate on arrays of data, either fed
-from each grid individually or fed from the entire data object at once.  The
-second one takes the results of the first, either as lists of arrays or as
-single arrays, and returns the final values.  For an example, we look at the
-``TotalMass`` function:
-
-.. code-block:: python
-
-   def _TotalMass(data):
-       baryon_mass = data["cell_mass"].sum()
-       particle_mass = data["ParticleMassMsun"].sum()
-       return baryon_mass, particle_mass
-   def _combTotalMass(data, baryon_mass, particle_mass):
-       return baryon_mass.sum() + particle_mass.sum()
-   add_quantity("TotalMass", function=_TotalMass,
-                combine_function=_combTotalMass, n_ret = 2)
-
-Once the two functions have been defined, we then call :func:`add_quantity` to
-tell it the function that defines the data, the collator function, and the
-number of values that get passed between them.  In this case we return both the
-particle and the baryon mass, so we have two total values passed from the main
-function into the collator.
-
 .. _arbitrary-grid:
 
 Arbitrary Grids Objects for Particle Deposition


https://bitbucket.org/yt_analysis/yt/commits/139f728b2c2c/
Changeset:   139f728b2c2c
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 06:34:51
Summary:     Modifying the layout of the "analyze" docs page.
Affected #:  2 files

diff -r 07232b3d2814f0a0b9db90568e2991db1c97127c -r 139f728b2c2c872840030fad60a031e6907bff31 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -222,4 +222,3 @@
 a field for the smoothing length ``h``, which is roughly equivalent to 
 ``(m/\rho)^{1/3}``, where ``m`` and ``rho`` are the particle mass and density 
 respectively.  This can be useful for doing neighbour finding.
-

diff -r 07232b3d2814f0a0b9db90568e2991db1c97127c -r 139f728b2c2c872840030fad60a031e6907bff31 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -6,13 +6,13 @@
 .. toctree::
    :maxdepth: 2
 
+   fields
+   creating_derived_fields
    objects
    units/index
-   fields
-   creating_derived_fields
    filtering
    generating_processed_data
    time_series_analysis
+   parallel_computation
    external_analysis
-   parallel_computation
    analysis_modules/index


https://bitbucket.org/yt_analysis/yt/commits/e72891e134a2/
Changeset:   e72891e134a2
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 06:40:16
Summary:     Moving creating_derived_fields docs to dev docs.
Affected #:  4 files

diff -r 139f728b2c2c872840030fad60a031e6907bff31 -r e72891e134a280a27f71d1a661532c7183069df4 doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ /dev/null
@@ -1,209 +0,0 @@
-.. _creating-derived-fields:
-
-Creating Derived Fields
-=======================
-
-One of the more powerful means of extending ``yt`` is through the usage of derived
-fields.  These are fields that describe a value at each cell in a simulation.
-
-Defining a New Field
---------------------
-
-So once a new field has been conceived of, the best way to create it is to
-construct a function that performs an array operation -- operating on a 
-collection of data, neutral to its size, shape, and type. (All fields should
-be provided as 64-bit floats.)
-
-A simple example of this is the pressure field, which demonstrates the ease of
-this approach.
-
-.. code-block:: python
-
-   import yt
-
-   def _pressure(field, data):
-       return (data.ds.gamma - 1.0) * \
-              data["density"] * data["thermal_energy"]
-
-Note that we do a couple different things here.  We access the "gamma"
-parameter from the dataset, we access the "density" field and we access
-the "thermal_energy" field.  "thermal_energy" is, in fact, another derived field!
-("thermal_energy" deals with the distinction in storage of energy between dual
-energy formalism and non-DEF.)  We don't do any loops, we don't do any
-type-checking, we can simply multiply the three items together.
-
-Once we've defined our function, we need to notify ``yt`` that the field is
-available.  The :func:`add_field` function is the means of doing this; it has a
-number of fairly specific parameters that can be passed in, but here we'll only
-look at the most basic ones needed for a simple scalar baryon field.
-
-.. code-block:: python
-
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
-
-We feed it the name of the field, the name of the function, and the
-units.  Note that the units parameter is a "raw" string, in the format that ``yt`` uses
-in its `symbolic units implementation <units>`_ (e.g., employing only unit names, numbers,
-and mathematical operators in the string, and using ``"**"`` for exponentiation). We suggest
-that you name the function that creates a derived field with the intended field name prefixed
-by a single underscore, as in the ``_pressure`` example above.
-
-:func:`add_field` can be invoked in two other ways. The first is by the function
-decorator :func:`derived_field`. The following code is equivalent to the previous
-example:
-
-.. code-block:: python
-
-   from yt import derived_field
-
-   @derived_field(name="pressure", units="dyne/cm**2")
-   def _pressure(field, data):
-       return (data.ds.gamma - 1.0) * \
-              data["density"] * data["thermal_energy"]
-
-The :func:`derived_field` decorator takes the same arguments as :func:`add_field`,
-and is often a more convenient shorthand in cases where you want to quickly set up
-a new field.
-
-Defining derived fields in the above fashion must be done before a dataset is loaded,
-in order for the dataset to recognize it. If you want to set up a derived field after you
-have loaded a dataset, or if you only want to set up a derived field for a particular
-dataset, there is an :meth:`add_field` method that hangs off dataset objects. The calling
-syntax is the same:
-
-.. code-block:: python
-
-   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
-
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
-
-A More Complicated Example
---------------------------
-
-But what if we want to do something a bit more fancy?  Here's an example of getting
-parameters from the data object and using those to define the field;
-specifically, here we obtain the ``center`` and ``bulk_velocity`` parameters
-and use those to define a field for radial velocity (there is already a ``"radial_velocity"``
-field in ``yt``, but we create this one here just as a transparent and simple example).
-
-.. code-block:: python
-
-   from yt.fields.api import ValidateParameter
-   import numpy as np
-
-   def _my_radial_velocity(field, data):
-       if data.has_field_parameter("bulk_velocity"):
-           bv = data.get_field_parameter("bulk_velocity").in_units("cm/s")
-       else:
-           bv = data.ds.arr(np.zeros(3), "cm/s")
-       xv = data["gas","velocity_x"] - bv[0]
-       yv = data["gas","velocity_y"] - bv[1]
-       zv = data["gas","velocity_z"] - bv[2]
-       center = data.get_field_parameter('center')
-       x_hat = data["x"] - center[0]
-       y_hat = data["y"] - center[1]
-       z_hat = data["z"] - center[2]
-       r = np.sqrt(x_hat*x_hat+y_hat*y_hat+z_hat*z_hat)
-       x_hat /= r
-       y_hat /= r
-       z_hat /= r
-       return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
-                function=_my_radial_velocity,
-                units="cm/s",
-                take_log=False,
-                validators=[ValidateParameter('center'),
-                            ValidateParameter('bulk_velocity')])
-
-Note that we have added a few parameters below the main function; we specify
-that we do not wish to display this field as logged, that we require both
-``bulk_velocity`` and ``center`` to be present in a given data object we wish
-to calculate this for, and we say that it should not be displayed in a
-drop-down box of fields to display. This is done through the parameter
-*validators*, which accepts a list of :class:`FieldValidator` objects. These
-objects define the way in which the field is generated, and when it is able to
-be created. In this case, we mandate that parameters *center* and
-*bulk_velocity* are set before creating the field. These are set via
-:meth:`~yt.data_objects.data_containers.set_field_parameter`, which can 
-be called on any object that has fields:
-
-.. code-block:: python
-
-   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   sp = ds.sphere("max", (200.,"kpc"))
-   sp.set_field_parameter("bulk_velocity", yt.YTArray([-100.,200.,300.], "km/s"))
-
-In this case, we already know what the *center* of the sphere is, so we do not set it. Also,
-note that *center* and *bulk_velocity* need to be :class:`YTArray` objects with units.
-
-Other examples for creating derived fields can be found in the cookbook recipes
-:ref:`cookbook-simple-derived-fields` and :ref:`cookbook-complex-derived-fields`.
-
-.. _derived-field-options:
-
-Field Options
--------------
-
-The arguments to :func:`add_field` are passed on to the constructor of :class:`DerivedField`.
-There are a number of options available, but the only mandatory ones are ``name``,
-``units``, and ``function``.
-
-   ``name``
-     This is the name of the field -- how you refer to it.  For instance,
-     ``pressure`` or ``magnetic_field_strength``.
-   ``function``
-     This is a function handle that defines the field
-   ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
-   ``display_name``
-     This is a name used in the plots, for instance ``"Divergence of
-     Velocity"``.  If not supplied, the ``name`` value is used.
-   ``take_log``
-     This is *True* or *False* and describes whether the field should be logged
-     when plotted.
-   ``particle_type``
-     Is this field a *particle* field?
-   ``validators``
-     (*Advanced*) This is a list of :class:`FieldValidator` objects, for instance to mandate
-     spatial data.
-   ``display_field``
-     (*Advanced*) Should this field appear in the dropdown box in Reason?
-   ``not_in_all``
-     (*Advanced*) If this is *True*, the field may not be in all the grids.
-   ``output_units``
-     (*Advanced*) For fields that exist on disk, which we may want to convert to other
-     fields or that get aliased to themselves, we can specify a different
-     desired output unit than the unit found on disk.
-
-Units for Cosmological Datasets
--------------------------------
-
-``yt`` has additional capabilities to handle the comoving coordinate system used
-internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts correspoding to
-comoving units, scaled comoving units, and scaled proper units. In all cases
-'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
-unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.
-
-To access these different units, yt has a common naming system. Scaled units are denoted by
-dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving
-units are denoted by appending ``cm`` to the end of the unit name.
-
-Using the parsec as an example,
-
-``pc``
-    Proper parsecs, :math:`\rm{pc}`.
-
-``pccm``
-    Comoving parsecs, :math:`\rm{pc}/(1+z)`.
-
-``pccm/h``
-    Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`.
-
-``pc/h``
-    Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`.
-
-Further examples of this functionality are shown in :ref:`comoving_units_and_code_units`.

diff -r 139f728b2c2c872840030fad60a031e6907bff31 -r e72891e134a280a27f71d1a661532c7183069df4 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -7,7 +7,6 @@
    :maxdepth: 2
 
    fields
-   creating_derived_fields
    objects
    units/index
    filtering

diff -r 139f728b2c2c872840030fad60a031e6907bff31 -r e72891e134a280a27f71d1a661532c7183069df4 doc/source/developing/creating_derived_fields.rst
--- /dev/null
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -0,0 +1,209 @@
+.. _creating-derived-fields:
+
+Creating Derived Fields
+=======================
+
+One of the more powerful means of extending ``yt`` is through the usage of derived
+fields.  These are fields that describe a value at each cell in a simulation.
+
+Defining a New Field
+--------------------
+
+So once a new field has been conceived of, the best way to create it is to
+construct a function that performs an array operation -- operating on a 
+collection of data, neutral to its size, shape, and type. (All fields should
+be provided as 64-bit floats.)
+
+A simple example of this is the pressure field, which demonstrates the ease of
+this approach.
+
+.. code-block:: python
+
+   import yt
+
+   def _pressure(field, data):
+       return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+
+Note that we do a couple different things here.  We access the "gamma"
+parameter from the dataset, we access the "density" field and we access
+the "thermal_energy" field.  "thermal_energy" is, in fact, another derived field!
+("thermal_energy" deals with the distinction in storage of energy between dual
+energy formalism and non-DEF.)  We don't do any loops, we don't do any
+type-checking, we can simply multiply the three items together.
+
+Once we've defined our function, we need to notify ``yt`` that the field is
+available.  The :func:`add_field` function is the means of doing this; it has a
+number of fairly specific parameters that can be passed in, but here we'll only
+look at the most basic ones needed for a simple scalar baryon field.
+
+.. code-block:: python
+
+   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
+
+We feed it the name of the field, the name of the function, and the
+units.  Note that the units parameter is a "raw" string, in the format that ``yt`` uses
+in its `symbolic units implementation <units>`_ (e.g., employing only unit names, numbers,
+and mathematical operators in the string, and using ``"**"`` for exponentiation). We suggest
+that you name the function that creates a derived field with the intended field name prefixed
+by a single underscore, as in the ``_pressure`` example above.
+
+:func:`add_field` can be invoked in two other ways. The first is by the function
+decorator :func:`derived_field`. The following code is equivalent to the previous
+example:
+
+.. code-block:: python
+
+   from yt import derived_field
+
+   @derived_field(name="pressure", units="dyne/cm**2")
+   def _pressure(field, data):
+       return (data.ds.gamma - 1.0) * \
+              data["density"] * data["thermal_energy"]
+
+The :func:`derived_field` decorator takes the same arguments as :func:`add_field`,
+and is often a more convenient shorthand in cases where you want to quickly set up
+a new field.
+
+Defining derived fields in the above fashion must be done before a dataset is loaded,
+in order for the dataset to recognize it. If you want to set up a derived field after you
+have loaded a dataset, or if you only want to set up a derived field for a particular
+dataset, there is an :meth:`add_field` method that hangs off dataset objects. The calling
+syntax is the same:
+
+.. code-block:: python
+
+   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
+   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
+
+If you find yourself using the same custom-defined fields over and over, you
+should put them in your plugins file as described in :ref:`plugin-file`.
+
+A More Complicated Example
+--------------------------
+
+But what if we want to do something a bit more fancy?  Here's an example of getting
+parameters from the data object and using those to define the field;
+specifically, here we obtain the ``center`` and ``bulk_velocity`` parameters
+and use those to define a field for radial velocity (there is already a ``"radial_velocity"``
+field in ``yt``, but we create this one here just as a transparent and simple example).
+
+.. code-block:: python
+
+   from yt.fields.api import ValidateParameter
+   import numpy as np
+
+   def _my_radial_velocity(field, data):
+       if data.has_field_parameter("bulk_velocity"):
+           bv = data.get_field_parameter("bulk_velocity").in_units("cm/s")
+       else:
+           bv = data.ds.arr(np.zeros(3), "cm/s")
+       xv = data["gas","velocity_x"] - bv[0]
+       yv = data["gas","velocity_y"] - bv[1]
+       zv = data["gas","velocity_z"] - bv[2]
+       center = data.get_field_parameter('center')
+       x_hat = data["x"] - center[0]
+       y_hat = data["y"] - center[1]
+       z_hat = data["z"] - center[2]
+       r = np.sqrt(x_hat*x_hat+y_hat*y_hat+z_hat*z_hat)
+       x_hat /= r
+       y_hat /= r
+       z_hat /= r
+       return xv*x_hat + yv*y_hat + zv*z_hat
+   yt.add_field("my_radial_velocity",
+                function=_my_radial_velocity,
+                units="cm/s",
+                take_log=False,
+                validators=[ValidateParameter('center'),
+                            ValidateParameter('bulk_velocity')])
+
+Note that we have added a few parameters below the main function; we specify
+that we do not wish to display this field as logged, that we require both
+``bulk_velocity`` and ``center`` to be present in a given data object we wish
+to calculate this for, and we say that it should not be displayed in a
+drop-down box of fields to display. This is done through the parameter
+*validators*, which accepts a list of :class:`FieldValidator` objects. These
+objects define the way in which the field is generated, and when it is able to
+be created. In this case, we mandate that parameters *center* and
+*bulk_velocity* are set before creating the field. These are set via
+:meth:`~yt.data_objects.data_containers.set_field_parameter`, which can 
+be called on any object that has fields:
+
+.. code-block:: python
+
+   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
+   sp = ds.sphere("max", (200.,"kpc"))
+   sp.set_field_parameter("bulk_velocity", yt.YTArray([-100.,200.,300.], "km/s"))
+
+In this case, we already know what the *center* of the sphere is, so we do not set it. Also,
+note that *center* and *bulk_velocity* need to be :class:`YTArray` objects with units.
+
+Other examples for creating derived fields can be found in the cookbook recipes
+:ref:`cookbook-simple-derived-fields` and :ref:`cookbook-complex-derived-fields`.
+
+.. _derived-field-options:
+
+Field Options
+-------------
+
+The arguments to :func:`add_field` are passed on to the constructor of :class:`DerivedField`.
+There are a number of options available, but the only mandatory ones are ``name``,
+``units``, and ``function``.
+
+   ``name``
+     This is the name of the field -- how you refer to it.  For instance,
+     ``pressure`` or ``magnetic_field_strength``.
+   ``function``
+     This is a function handle that defines the field
+   ``units``
+     This is a string that describes the units. Powers must be in
+     Python syntax (``**`` instead of ``^``).
+   ``display_name``
+     This is a name used in the plots, for instance ``"Divergence of
+     Velocity"``.  If not supplied, the ``name`` value is used.
+   ``take_log``
+     This is *True* or *False* and describes whether the field should be logged
+     when plotted.
+   ``particle_type``
+     Is this field a *particle* field?
+   ``validators``
+     (*Advanced*) This is a list of :class:`FieldValidator` objects, for instance to mandate
+     spatial data.
+   ``display_field``
+     (*Advanced*) Should this field appear in the dropdown box in Reason?
+   ``not_in_all``
+     (*Advanced*) If this is *True*, the field may not be in all the grids.
+   ``output_units``
+     (*Advanced*) For fields that exist on disk, which we may want to convert to other
+     fields or that get aliased to themselves, we can specify a different
+     desired output unit than the unit found on disk.
+
+Units for Cosmological Datasets
+-------------------------------
+
+``yt`` has additional capabilities to handle the comoving coordinate system used
+internally in cosmological simulations. Simulations that use comoving
+coordinates, all length units have three other counterparts correspoding to
+comoving units, scaled comoving units, and scaled proper units. In all cases
+'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
+unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.
+
+To access these different units, yt has a common naming system. Scaled units are denoted by
+dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving
+units are denoted by appending ``cm`` to the end of the unit name.
+
+Using the parsec as an example,
+
+``pc``
+    Proper parsecs, :math:`\rm{pc}`.
+
+``pccm``
+    Comoving parsecs, :math:`\rm{pc}/(1+z)`.
+
+``pccm/h``
+    Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`.
+
+``pc/h``
+    Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`.
+
+Further examples of this functionality are shown in :ref:`comoving_units_and_code_units`.

diff -r 139f728b2c2c872840030fad60a031e6907bff31 -r e72891e134a280a27f71d1a661532c7183069df4 doc/source/developing/index.rst
--- a/doc/source/developing/index.rst
+++ b/doc/source/developing/index.rst
@@ -21,6 +21,7 @@
    testing
    debugdrive
    creating_datatypes
+   creating_derived_fields
    creating_derived_quantities
    creating_frontend
    building_the_docs


https://bitbucket.org/yt_analysis/yt/commits/f3b22fea6999/
Changeset:   f3b22fea6999
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 06:44:56
Summary:     Adding reference for creating new data object types.
Affected #:  2 files

diff -r e72891e134a280a27f71d1a661532c7183069df4 -r f3b22fea69992d94b4f008c12a2335a7c5153dc5 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -77,6 +77,9 @@
 * *Construction Objects* - Objects represent some sort of data product 
 constructed by additional analysis
 
+If you want to create your own custom data object type, see 
+:ref:`creating-objects`.
+
 Geometric Objects
 ^^^^^^^^^^^^^^^^^
 

diff -r e72891e134a280a27f71d1a661532c7183069df4 -r f3b22fea69992d94b4f008c12a2335a7c5153dc5 doc/source/developing/creating_datatypes.rst
--- a/doc/source/developing/creating_datatypes.rst
+++ b/doc/source/developing/creating_datatypes.rst
@@ -1,6 +1,6 @@
 .. _creating-objects:
 
-Creating 3D Datatypes
+Creating Data Objects
 =====================
 
 The three-dimensional datatypes in yt follow a fairly simple protocol.  The


https://bitbucket.org/yt_analysis/yt/commits/246adc798849/
Changeset:   246adc798849
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 06:57:01
Summary:     Consolidating the two separate discussions of "surfaces"
Affected #:  2 files

diff -r f3b22fea69992d94b4f008c12a2335a7c5153dc5 -r 246adc7988497ed73a3282504f0617c52da2b196 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -428,30 +428,6 @@
 contains :class:`~yt.data_objects.data_containers.AMRExtractedRegionBase`
 objects.  These can be queried just as any other data object.
 
-.. _extracting-isocontour-information:
-
-Surface Objects and Extracting Isocontour Information
------------------------------------------------------
-
-``yt`` contains an implementation of the `Marching Cubes
-<http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm, which can operate on
-3D data objects.  This provides two things.  The first is to identify
-isocontours and return either the geometry of those isocontours or to return
-another field value sampled along that isocontour.  The second piece of
-functionality is to calculate the flux of a field over an isocontour.
-
-Note that these isocontours are not guaranteed to be topologically connected.
-In fact, inside a given data object, the marching cubes algorithm will return
-all isocontours, not just a single connected one.  This means if you encompass
-two clumps of a given density in your data object and extract an isocontour at
-that density, it will include both of the clumps.
-
-To extract geometry or sample a field, call
-:meth:`~yt.data_objects.data_containers.AMR3DData.extract_isocontours`.  To
-calculate a flux, call
-:meth:`~yt.data_objects.data_containers.AMR3DData.calculate_isocontour_flux`.
-both of these operations will run in parallel.
-
 .. _object-serialization:
 
 Storing and Loading Objects

diff -r f3b22fea69992d94b4f008c12a2335a7c5153dc5 -r 246adc7988497ed73a3282504f0617c52da2b196 doc/source/visualizing/sketchfab.rst
--- a/doc/source/visualizing/sketchfab.rst
+++ b/doc/source/visualizing/sketchfab.rst
@@ -1,32 +1,52 @@
+.. _extracting-isocontour-information:
+.. _surfaces:
+
 3D Surfaces and Sketchfab
 =========================
 
 .. sectionauthor:: Jill Naiman and Matthew Turk
 
-Surfaces
---------
+Surface Objects and Extracting Isocontour Information
+-----------------------------------------------------
 
-For a while now, yt has had the ability to extract isosurfaces from volumetric
-data using a `marching cubes <http://en.wikipedia.org/wiki/Marching_cubes>`_
-algorithm.  The surfaces could be exported in `OBJ format
-<http://en.wikipedia.org/wiki/Wavefront_.obj_file>`_, values could be samples
-at the center of each face of the surface, and flux of a given field could be
-calculated over the surface.  This means you could, for instance, extract an
-isocontour in density and calculate the mass flux over that isocontour.  It
-also means you could export a surface from yt and view it in something like
-`Blender <http://www.blender.org/>`_, `MeshLab
-<http://meshlab.sourceforge.net/>`_, or even on your Android or iOS device in
-`MeshPad <http://www.meshpad.org/>`_ or `MeshLab Android
-<https://play.google.com/store/apps/details?id=it.isticnr.meshlab&hl=en>`_.
-One important caveat with marching cubes is that with adaptive mesh refinement
+``yt`` contains an implementation of the `Marching Cubes
+<http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm, which can operate on
+3D data objects.  This provides two things.  The first is to identify
+isocontours and return either the geometry of those isocontours or to return
+another field value sampled along that isocontour.  The second piece of
+functionality is to calculate the flux of a field over an isocontour.
+
+Note that these isocontours are not guaranteed to be topologically connected.
+In fact, inside a given data object, the marching cubes algorithm will return
+all isocontours, not just a single connected one.  This means if you encompass
+two clumps of a given density in your data object and extract an isocontour at
+that density, it will include both of the clumps.
+
+This means that with adaptive mesh refinement
 data, you *will* see cracks across refinement boundaries unless a
 "crack-fixing" step is applied to match up these boundaries.  yt does not
 perform such an operation, and so there will be seams visible in 3D views of
 your isosurfaces.
 
-The methods to do so were methods on data objects -- ``extract_isocontours``,
-``calculate_isocontour_flux`` -- which returned just numbers or values.
-However, recently, I've created a new object called ``AMRSurface`` that makes
+
+Surfaces can be exported in `OBJ format
+<http://en.wikipedia.org/wiki/Wavefront_.obj_file>`_, values can be samples
+at the center of each face of the surface, and flux of a given field could be
+calculated over the surface.  This means you can, for instance, extract an
+isocontour in density and calculate the mass flux over that isocontour.  It
+also means you can export a surface from yt and view it in something like
+`Blender <http://www.blender.org/>`_, `MeshLab
+<http://meshlab.sourceforge.net/>`_, or even on your Android or iOS device in
+`MeshPad <http://www.meshpad.org/>`_ or `MeshLab Android
+<https://play.google.com/store/apps/details?id=it.isticnr.meshlab&hl=en>`_.
+
+To extract geometry or sample a field, call
+:meth:`~yt.data_objects.data_containers.AMR3DData.extract_isocontours`.  To
+calculate a flux, call
+:meth:`~yt.data_objects.data_containers.AMR3DData.calculate_isocontour_flux`.
+both of these operations will run in parallel.
+
+Alternatively, you can make an object called ``YTSurfaceBase`` that makes
 this process much easier.  You can create one of these objects by specifying a
 source data object and a field over which to identify a surface at a given
 value.  For example:
@@ -38,7 +58,7 @@
    sphere = ds.sphere("max", (1.0, "mpc"))
    surface = ds.surface(sphere, "density", 1e-27)
 
-This object, ``surface``, can now be queried for values on the surface.  For
+This object, ``surface``, can be queried for values on the surface.  For
 instance:
 
 .. code-block:: python
@@ -80,7 +100,7 @@
 discuss morphological properties of a dataset with collaborators.  It's also
 just plain cool.
 
-The ``AMRSurface`` object includes a method to upload directly to Sketchfab,
+The ``YTSurfaceBase`` object includes a method to upload directly to Sketchfab,
 but it requires that you get an API key first.  You can get this API key by
 creating an account and then going to your "dashboard," where it will be listed
 on the right hand side.  Once you've obtained it, put it into your


https://bitbucket.org/yt_analysis/yt/commits/08f553c5c710/
Changeset:   08f553c5c710
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 07:20:02
Summary:     Moving data_selection_and_fields cookbook to fields_and_unit_conversion, since it is more accurate.  it also makes it easier to find the unit conversion docs, which currently are buried.
Affected #:  5 files

diff -r 246adc7988497ed73a3282504f0617c52da2b196 -r 08f553c5c710b7a28520faa0383776d273ea2cd2 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ /dev/null
@@ -1,462 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the past, querying a data object with a field name returned a NumPy `ndarray` . In the new unit system, data object queries will return a `YTArray`, a subclass of `ndarray` that preserves all of the nice properties of `ndarray`, including broadcasting, deep and shallow copies, and views. "
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Selecting data from an object"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`YTArray` is 'unit-aware'.  Let's show how this works in practice using a sample Enzo dataset:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
-      "          \n",
-      "dd = ds.all_data()\n",
-      "maxval, maxloc = ds.find_max('density')\n",
-      "\n",
-      "dens = dd['density']"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print maxval"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "mass = dd['cell_mass']\n",
-      "\n",
-      "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
-      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
-      "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
-      "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "dx = dd['dx']\n",
-      "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
-      "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
-      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
-      "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Unit conversions"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
-      "\n",
-      "* `in_units`\n",
-      "* `in_cgs`\n",
-      "* `in_mks`\n",
-      "* `convert_to_units`\n",
-      "* `convert_to_cgs`\n",
-      "* `convert_to_mks`"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The first method, `in_units`, returns a copy of the array in the units denoted by a string argument:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['density'].in_units('Msun/pc**3')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (dd['pressure'])\n",
-      "print (dd['pressure']).in_cgs()\n",
-      "print (dd['pressure']).in_mks()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The next two methods do in-place conversions:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "dens = dd['density']\n",
-      "print dens\n",
-      "\n",
-      "dens.convert_to_units('Msun/pc**3')\n",
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One possibly confusing wrinkle when using in-place conversions is if you try to query `dd['density']` again, you'll find that it has been converted to solar masses per cubic parsec:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['density']\n",
-      "\n",
-      "dens.convert_to_units('g/cm**3')\n",
-      "\n",
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n",
-      "\n",
-      "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy.  This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['cell_mass']\n",
-      "print dd['cell_volume'].in_units('cm**3')\n",
-      "\n",
-      "print (dd['cell_mass']/dd['cell_volume']).in_cgs()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Working with views and converting to ndarray"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are two ways to convert the data into a numpy array.  The most straightforward and safe way to do this is to create a copy of the array data.  The following cell demonstrates four equivalent ways of doing this, in increasing degree of terseness."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import numpy as np\n",
-      "\n",
-      "dens = dd['cell_mass']\n",
-      "\n",
-      "print dens.to_ndarray()\n",
-      "print np.array(dens)\n",
-      "print dens.value\n",
-      "print dens.v"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since we have a copy of the data, we can mess with it however we wish without disturbing the original data returned by the yt data object."
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Another way to touch the raw array data is to get a _view_.  A numpy view is a lightweight array interface to a memory buffer. There are four ways to create views of YTArray instances:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['cell_mass'].ndarray_view()\n",
-      "print dd['cell_mass'].view(np.ndarray)\n",
-      "print dd['cell_mass'].ndview\n",
-      "print dd['cell_mass'].d"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When working with views, rememeber that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "density_values = dd['density'].d\n",
-      "density_values[0:10] = 0\n",
-      "\n",
-      "# The original array was updated\n",
-      "print dd['density']"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Some examples of converting from AstroPy units to yt:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from astropy import units as u\n",
-      "from yt import YTQuantity, YTArray\n",
-      "\n",
-      "x = 42.0 * u.meter\n",
-      "y = YTQuantity.from_astropy(x) "
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print x, type(x)\n",
-      "print y, type(y)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray.from_astropy(a)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print a, type(a)\n",
-      "print b, type(b)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "temp = dd[\"temperature\"]\n",
-      "atemp = temp.to_astropy()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print temp, type(temp)\n",
-      "print atemp, type(atemp)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "and quantities:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.utilities.physical_constants import kboltz\n",
-      "kb = kboltz.to_astropy()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print kboltz, type(kboltz)\n",
-      "print kb, type(kb)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "As a sanity check, you can show that it works round-trip:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity.from_astropy(kb)\n",
-      "print k1 == k2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "c = YTArray.from_astropy(a)\n",
-      "d = c.to_astropy()\n",
-      "print a == d"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 246adc7988497ed73a3282504f0617c52da2b196 -r 08f553c5c710b7a28520faa0383776d273ea2cd2 doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
--- /dev/null
+++ b/doc/source/analyzing/units/2)_Fields_and_unit_conversion.ipynb
@@ -0,0 +1,462 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the past, querying a data object with a field name returned a NumPy `ndarray` . In the new unit system, data object queries will return a `YTArray`, a subclass of `ndarray` that preserves all of the nice properties of `ndarray`, including broadcasting, deep and shallow copies, and views. "
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Selecting data from an object"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`YTArray` is 'unit-aware'.  Let's show how this works in practice using a sample Enzo dataset:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
+      "          \n",
+      "dd = ds.all_data()\n",
+      "maxval, maxloc = ds.find_max('density')\n",
+      "\n",
+      "dens = dd['density']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print maxval"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "mass = dd['cell_mass']\n",
+      "\n",
+      "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
+      "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
+      "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dx = dd['dx']\n",
+      "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
+      "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
+      "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Unit conversions"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
+      "\n",
+      "* `in_units`\n",
+      "* `in_cgs`\n",
+      "* `in_mks`\n",
+      "* `convert_to_units`\n",
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first method, `in_units`, returns a copy of the array in the units denoted by a string argument:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd['density'].in_units('Msun/pc**3')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The next two methods do in-place conversions:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "dens = dd['density']\n",
+      "print dens\n",
+      "\n",
+      "dens.convert_to_units('Msun/pc**3')\n",
+      "print dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "One possibly confusing wrinkle when using in-place conversions is if you try to query `dd['density']` again, you'll find that it has been converted to solar masses per cubic parsec:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd['density']\n",
+      "\n",
+      "dens.convert_to_units('g/cm**3')\n",
+      "\n",
+      "print dens"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n",
+      "\n",
+      "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy.  This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd['cell_mass']\n",
+      "print dd['cell_volume'].in_units('cm**3')\n",
+      "\n",
+      "print (dd['cell_mass']/dd['cell_volume']).in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Working with views and converting to ndarray"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "There are two ways to convert the data into a numpy array.  The most straightforward and safe way to do this is to create a copy of the array data.  The following cell demonstrates four equivalent ways of doing this, in increasing degree of terseness."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import numpy as np\n",
+      "\n",
+      "dens = dd['cell_mass']\n",
+      "\n",
+      "print dens.to_ndarray()\n",
+      "print np.array(dens)\n",
+      "print dens.value\n",
+      "print dens.v"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since we have a copy of the data, we can mess with it however we wish without disturbing the original data returned by the yt data object."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Another way to touch the raw array data is to get a _view_.  A numpy view is a lightweight array interface to a memory buffer. There are four ways to create views of YTArray instances:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print dd['cell_mass'].ndarray_view()\n",
+      "print dd['cell_mass'].view(np.ndarray)\n",
+      "print dd['cell_mass'].ndview\n",
+      "print dd['cell_mass'].d"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When working with views, rememeber that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "density_values = dd['density'].d\n",
+      "density_values[0:10] = 0\n",
+      "\n",
+      "# The original array was updated\n",
+      "print dd['density']"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Round-Trip Conversions to and from AstroPy's Units System"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some examples of converting from AstroPy units to yt:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from astropy import units as u\n",
+      "from yt import YTQuantity, YTArray\n",
+      "\n",
+      "x = 42.0 * u.meter\n",
+      "y = YTQuantity.from_astropy(x) "
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print x, type(x)\n",
+      "print y, type(y)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = np.random.random(size=10) * u.km/u.s\n",
+      "b = YTArray.from_astropy(a)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print a, type(a)\n",
+      "print b, type(b)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "temp = dd[\"temperature\"]\n",
+      "atemp = temp.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print atemp, type(atemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import kboltz\n",
+      "kb = kboltz.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print kboltz, type(kboltz)\n",
+      "print kb, type(kb)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As a sanity check, you can show that it works round-trip:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "k1 = kboltz.to_astropy()\n",
+      "k2 = YTQuantity.from_astropy(kb)\n",
+      "print k1 == k2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "c = YTArray.from_astropy(a)\n",
+      "d = c.to_astropy()\n",
+      "print a == d"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r 246adc7988497ed73a3282504f0617c52da2b196 -r 08f553c5c710b7a28520faa0383776d273ea2cd2 doc/source/analyzing/units/data_selection_and_fields.rst
--- a/doc/source/analyzing/units/data_selection_and_fields.rst
+++ /dev/null
@@ -1,74 +0,0 @@
-.. _data_selection_and_fields:
-
-Data selection and fields
-=========================
-
-.. notebook:: 2)_Data_Selection_and_fields.ipynb
-
-Derived Fields
---------------
-
-.. This needs to be added outside the notebook since user-defined derived fields
-   require a 'fresh' kernel.
-
-The following example creates a derived field for the square root of the cell
-volume.
-
-.. notebook-cell::
-
-   import yt
-   import numpy as np
-
-   # Function defining the derived field
-   def root_cell_volume(field, data):
-      return np.sqrt(data['cell_volume'])
-
-   # Load the dataset
-   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
-
-   # Add the field to the dataset, linking to the derived field function and 
-   # units of the field
-   ds.add_field(("gas", "root_cell_volume"), units="cm**(3/2)", function=root_cell_volume)
-
-   # Access the derived field like any other field
-   ad = ds.all_data()
-   ad['root_cell_volume']
-
-No special unit logic needs to happen inside of the function - `np.sqrt` will
-convert the units of the `density` field appropriately:
-
-.. notebook-cell::
-   :skip_exceptions:
-
-   import yt
-   import numpy as np
-
-   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
-   ad = ds.all_data()
-
-   print ad['cell_volume'].in_cgs()
-   print np.sqrt(ad['cell_volume'].in_cgs())
-
-That said, it is necessary to specify the units in the call to the
-:code:`add_field` function.  Not only does this ensure the returned units
-will be exactly what you expect, it also allows an in-place conversion of units,
-just in case the function returns a field with dimensionally equivalent units.
-
-For example, let's redo the above example but ask for units of
-:code:`Mpc**(3/2)`:
-
-.. notebook-cell::
-
-   import yt
-   import numpy as np
-
-   def root_cell_volume(field, data):
-      return np.sqrt(data['cell_volume'])
-
-   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
-
-   # Here we set the default units to Mpc^(3/2)
-   ds.add_field(("gas", "root_cell_volume"), units="Mpc**(3/2)", function=root_cell_volume)
-
-   ad = ds.all_data()
-   ad['root_cell_volume']

diff -r 246adc7988497ed73a3282504f0617c52da2b196 -r 08f553c5c710b7a28520faa0383776d273ea2cd2 doc/source/analyzing/units/fields_and_unit_conversion.rst
--- /dev/null
+++ b/doc/source/analyzing/units/fields_and_unit_conversion.rst
@@ -0,0 +1,74 @@
+.. _data_selection_and_fields:
+
+Data selection and fields
+=========================
+
+.. notebook:: 2)_Data_Selection_and_fields.ipynb
+
+Derived Fields
+--------------
+
+.. This needs to be added outside the notebook since user-defined derived fields
+   require a 'fresh' kernel.
+
+The following example creates a derived field for the square root of the cell
+volume.
+
+.. notebook-cell::
+
+   import yt
+   import numpy as np
+
+   # Function defining the derived field
+   def root_cell_volume(field, data):
+      return np.sqrt(data['cell_volume'])
+
+   # Load the dataset
+   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
+
+   # Add the field to the dataset, linking to the derived field function and 
+   # units of the field
+   ds.add_field(("gas", "root_cell_volume"), units="cm**(3/2)", function=root_cell_volume)
+
+   # Access the derived field like any other field
+   ad = ds.all_data()
+   ad['root_cell_volume']
+
+No special unit logic needs to happen inside of the function - `np.sqrt` will
+convert the units of the `density` field appropriately:
+
+.. notebook-cell::
+   :skip_exceptions:
+
+   import yt
+   import numpy as np
+
+   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
+   ad = ds.all_data()
+
+   print ad['cell_volume'].in_cgs()
+   print np.sqrt(ad['cell_volume'].in_cgs())
+
+That said, it is necessary to specify the units in the call to the
+:code:`add_field` function.  Not only does this ensure the returned units
+will be exactly what you expect, it also allows an in-place conversion of units,
+just in case the function returns a field with dimensionally equivalent units.
+
+For example, let's redo the above example but ask for units of
+:code:`Mpc**(3/2)`:
+
+.. notebook-cell::
+
+   import yt
+   import numpy as np
+
+   def root_cell_volume(field, data):
+      return np.sqrt(data['cell_volume'])
+
+   ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
+
+   # Here we set the default units to Mpc^(3/2)
+   ds.add_field(("gas", "root_cell_volume"), units="Mpc**(3/2)", function=root_cell_volume)
+
+   ad = ds.all_data()
+   ad['root_cell_volume']

diff -r 246adc7988497ed73a3282504f0617c52da2b196 -r 08f553c5c710b7a28520faa0383776d273ea2cd2 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -29,7 +29,7 @@
    :maxdepth: 1
 
    symbolic_units
-   data_selection_and_fields
+   fields_and_unit_conversion:
    comoving_units_and_code_units
    comparing_units_from_different_datasets
    units_and_plotting


https://bitbucket.org/yt_analysis/yt/commits/c7c9246e835d/
Changeset:   c7c9246e835d
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 07:50:44
Summary:     Minor corrections.
Affected #:  2 files

diff -r 08f553c5c710b7a28520faa0383776d273ea2cd2 -r c7c9246e835d4ce4e190ce54c9c54857e9b59ef7 doc/source/analyzing/units/index.rst
--- a/doc/source/analyzing/units/index.rst
+++ b/doc/source/analyzing/units/index.rst
@@ -29,7 +29,7 @@
    :maxdepth: 1
 
    symbolic_units
-   fields_and_unit_conversion:
+   fields_and_unit_conversion
    comoving_units_and_code_units
    comparing_units_from_different_datasets
    units_and_plotting

diff -r 08f553c5c710b7a28520faa0383776d273ea2cd2 -r c7c9246e835d4ce4e190ce54c9c54857e9b59ef7 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -342,9 +342,9 @@
         variable-resolution 2D object and transforms it into an NxM bitmap that
         can be plotted, examined or processed.  This is a convenience function
         to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other 2DData objects, this does
-        not accept a 'center' parameter as it is assumed to be centered at the
-        center of the cutting plane.
+        corresponding to_frb function for other YTSelectionContainer2D objects, 
+        this does not accept a 'center' parameter as it is assumed to be 
+        centered at the center of the cutting plane.
 
         Parameters
         ----------
@@ -463,9 +463,9 @@
         variable-resolution 2D object and transforms it into an NxM bitmap that
         can be plotted, examined or processed.  This is a convenience function
         to return an FRB directly from an existing 2D data object.  Unlike the
-        corresponding to_frb function for other 2DData objects, this does
-        not accept a 'center' parameter as it is assumed to be centered at the
-        center of the cutting plane.
+        corresponding to_frb function for other YTSelectionContainer2D objects, 
+        this does not accept a 'center' parameter as it is assumed to be 
+        centered at the center of the cutting plane.
 
         Parameters
         ----------


https://bitbucket.org/yt_analysis/yt/commits/3b00b630e4ae/
Changeset:   3b00b630e4ae
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 20:41:05
Summary:     Updating field description as per Matt's comments.
Affected #:  1 file

diff -r c7c9246e835d4ce4e190ce54c9c54857e9b59ef7 -r 3b00b630e4aec359feb01ae8583ec53e4d7dd232 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -169,11 +169,10 @@
 
 .. include reference here once it's done
 
-The full list of fields that are available for a dataset can be found as a 
+The full list of fields available for a dataset can be found as 
 the attribute ``field_list`` for native, on-disk fields and ``derived_field_list``
-which is a superset of ``field_list`` and includes all derived fields that can
-be calculated for a given dataset.  You can see these by examining the 
-two properties:
+for derived fields (``derived_field_list`` is a superset of ``field_list``).
+You can view these lists by examining a dataset like this:
 
 .. code-block:: python
 
@@ -181,25 +180,15 @@
    print ds.field_list
    print ds.derived_field_list
 
-When a field is added, it is added to a container that hangs off of the
-dataset, as well.  All of the field creation options
-(:ref:`derived-field-options`) are accessible through this object.  One
-can get the default units of a field by first indexing and then using the
-``get_units()`` function on the full field name (e.g. ("gas", "pressure")):
+By using the ``field_info()`` class, one can access information about a given
+field, like its default units or the source code for it.  
 
 .. code-block:: python
 
    ds = yt.load("my_data")
    ds.index
    print ds.field_info["gas", "pressure"].get_units()
-
-This is a fast way to examine the units of a given field, and additionally you
-can use :meth:`yt.utilities.pydot.get_source` to get the source code for a field:
-
-.. code-block:: python
-
-   field = ds.field_info["gas", "pressure"]
-   print field.get_source()
+   print ds.field_info["gas", "pressure"].get_source()
 
 Particle Fields
 ---------------


https://bitbucket.org/yt_analysis/yt/commits/c8133cfae467/
Changeset:   c8133cfae467
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 20:45:06
Summary:     Updating docs to reflect that cut_regions work on both particle and mesh fields.
Affected #:  1 file

diff -r 3b00b630e4aec359feb01ae8583ec53e4d7dd232 -r c8133cfae46726c378e3075cd802cb50f4fb7521 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -178,13 +178,14 @@
     | Usage: ``boolean()``
     See :ref:`boolean-data-objects`.
 
-**Mesh Field Filter** 
+**Filter** 
     | Class :class:`~yt.data_objects.data_containers.YTCutRegionBase`
     | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
     | A ``cut_region`` is a filter which can be applied to any other data 
-      object.  The filter is defined by the conditionals present, which only
-      work on mesh fields to apply cuts to the data in the object.  For more
-      detailed information and examples, see :ref:`cut-regions`.
+      object.  The filter is defined by the conditionals present, which 
+      apply cuts to the data in the object.  A ``cut_region`` will work
+      for either particle fields or mesh fields, but not on both simulaneously.
+      For more detailed information and examples, see :ref:`cut-regions`.
 
 **Collection of Data Objects** 
     | Class :class:`~yt.data_objects.data_containers.YTDataCollectionBase`
@@ -206,7 +207,8 @@
     | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
     | A 3D region with all data extracted and interpolated to a single, 
       specified resolution.  Identical to covering_grid, except that it 
-      interpolates.  See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
+      interpolates as necessary from coarse regions to fine.  See 
+      :ref:`examining-grid-data-in-a-fixed-resolution-array`.
 
 **Fixed-Resolution Region for Particle Deposition** 
     | Class :class:`~yt.data_objects.data_containers.YTArbitraryGridBase`


https://bitbucket.org/yt_analysis/yt/commits/a443361f7d8e/
Changeset:   a443361f7d8e
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 20:52:12
Summary:     Updating creating derived fields doc to match Matt's suggestions.
Affected #:  2 files

diff -r c8133cfae46726c378e3075cd802cb50f4fb7521 -r a443361f7d8e364d69046986529ca7c006c2f096 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -67,5 +67,3 @@
 thermal_energy_density, and then generate a projection from it.
 
 .. yt_cookbook:: derived_field.py
-
-.. _cookbook-complex-derived-fields:

diff -r c8133cfae46726c378e3075cd802cb50f4fb7521 -r a443361f7d8e364d69046986529ca7c006c2f096 doc/source/developing/creating_derived_fields.rst
--- a/doc/source/developing/creating_derived_fields.rst
+++ b/doc/source/developing/creating_derived_fields.rst
@@ -25,11 +25,10 @@
        return (data.ds.gamma - 1.0) * \
               data["density"] * data["thermal_energy"]
 
-Note that we do a couple different things here.  We access the "gamma"
-parameter from the dataset, we access the "density" field and we access
-the "thermal_energy" field.  "thermal_energy" is, in fact, another derived field!
-("thermal_energy" deals with the distinction in storage of energy between dual
-energy formalism and non-DEF.)  We don't do any loops, we don't do any
+Note that we do a couple different things here.  We access the ``gamma``
+parameter from the dataset, we access the ``density`` field and we access
+the ``thermal_energy`` field.  ``thermal_energy`` is, in fact, another derived 
+field!  We don't do any loops, we don't do any
 type-checking, we can simply multiply the three items together.
 
 Once we've defined our function, we need to notify ``yt`` that the field is
@@ -138,8 +137,8 @@
 In this case, we already know what the *center* of the sphere is, so we do not set it. Also,
 note that *center* and *bulk_velocity* need to be :class:`YTArray` objects with units.
 
-Other examples for creating derived fields can be found in the cookbook recipes
-:ref:`cookbook-simple-derived-fields` and :ref:`cookbook-complex-derived-fields`.
+Other examples for creating derived fields can be found in the cookbook recipe
+:ref:`cookbook-simple-derived-fields`.
 
 .. _derived-field-options:
 
@@ -150,30 +149,30 @@
 There are a number of options available, but the only mandatory ones are ``name``,
 ``units``, and ``function``.
 
-   ``name``
+``name``
      This is the name of the field -- how you refer to it.  For instance,
      ``pressure`` or ``magnetic_field_strength``.
-   ``function``
+``function``
      This is a function handle that defines the field
-   ``units``
+``units``
      This is a string that describes the units. Powers must be in
      Python syntax (``**`` instead of ``^``).
-   ``display_name``
+``display_name``
      This is a name used in the plots, for instance ``"Divergence of
      Velocity"``.  If not supplied, the ``name`` value is used.
-   ``take_log``
+``take_log``
      This is *True* or *False* and describes whether the field should be logged
      when plotted.
-   ``particle_type``
+``particle_type``
      Is this field a *particle* field?
-   ``validators``
+``validators``
      (*Advanced*) This is a list of :class:`FieldValidator` objects, for instance to mandate
      spatial data.
-   ``display_field``
+``display_field``
      (*Advanced*) Should this field appear in the dropdown box in Reason?
-   ``not_in_all``
+``not_in_all``
      (*Advanced*) If this is *True*, the field may not be in all the grids.
-   ``output_units``
+``output_units``
      (*Advanced*) For fields that exist on disk, which we may want to convert to other
      fields or that get aliased to themselves, we can specify a different
      desired output unit than the unit found on disk.


https://bitbucket.org/yt_analysis/yt/commits/fe6d8d32e7e4/
Changeset:   fe6d8d32e7e4
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 20:55:30
Summary:     Merging.
Affected #:  11 files

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -13,8 +13,13 @@
 drudd = drudd at uchicago.edu
 awetzel = andrew.wetzel at yale.edu
 David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins4096 = dcollins4096 at gmail.com
 dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
 jcforbes at ucsc.edu = jforbes at ucolick.org
+ngoldbau at ucsc.edu = goldbaum at ucolick.org
+biondo at wisc.edu = Biondo at wisc.edu
+samgeen at googlemail.com = samgeen at gmail.com
+fbogert = fbogert at ucsc.edu
\ No newline at end of file

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -4,7 +4,9 @@
                 Tom Abel (tabel at stanford.edu)
                 Gabriel Altay (gabriel.altay at gmail.com)
                 Kenza Arraki (karraki at gmail.com)
+                Elliott Biondo (biondo at wisc.edu)
                 Alex Bogert (fbogert at ucsc.edu)
+                Pengfei Chen (madcpf at gmail.com)
                 David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/cookbook/calculating_information.rst
--- a/doc/source/cookbook/calculating_information.rst
+++ b/doc/source/cookbook/calculating_information.rst
@@ -67,3 +67,11 @@
 thermal_energy_density, and then generate a projection from it.
 
 .. yt_cookbook:: derived_field.py
+
+Using Particle Filters to Calculate Star Formation Rates
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to use a particle filter to calculate the star
+formation rate in a galaxy evolution simulation.
+
+.. yt_cookbook:: particle_filter_sfr.py

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/cookbook/complex_plots.rst
--- a/doc/source/cookbook/complex_plots.rst
+++ b/doc/source/cookbook/complex_plots.rst
@@ -145,6 +145,29 @@
 
 .. _cookbook-camera_movement:
 
+Customized Profile Plot
+~~~~~~~~~~~~~~~~~~~~~~~
+
+This recipe demonstrates how to create a fully customized 1D profile object
+using the :meth:`~yt.data_objects.profiles.create_profile` function and then
+create a :class:`~yt.visualization.profile_plotter.ProfilePlot` using the
+customized profile.  This illustrates how a ``ProfilePlot`` created this way
+inherits the properties of the profile it is constructed from.
+
+.. yt_cookbook:: customized_profile_plot.py
+
+Customized Phase Plot
+~~~~~~~~~~~~~~~~~~~~~
+
+Similar to the recipe above, this demonstrates how to create a fully customized
+2D profile object using the :meth:`~yt.data_objects.profiles.create_profile`
+function and then create a :class:`~yt.visualization.profile_plotter.PhasePlot`
+using the customized profile object.  This illustrates how a ``PhasePlot``
+created this way inherits the properties of the profile object it is constructed
+from.
+
+.. yt_cookbook:: customized_phase_plot.py
+
 Moving a Volume Rendering Camera
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/cookbook/customized_phase_plot.py
--- /dev/null
+++ b/doc/source/cookbook/customized_phase_plot.py
@@ -0,0 +1,31 @@
+import yt
+import yt.units as u
+
+ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
+
+center = [0.53, 0.53, 0.53]
+normal = [0,0,1]
+radius = 40*u.kpc
+height = 2*u.kpc
+
+disk = ds.disk(center, [0,0,1], radius, height)
+
+profile = yt.create_profile(
+    data_source=disk,
+    bin_fields=["radius", "cylindrical_tangential_velocity"],
+    fields=["cell_mass"],
+    n_bins=256,
+    units=dict(radius="kpc",
+               cylindrical_tangential_velocity="km/s",
+               cell_mass="Msun"),
+    logs=dict(radius=False,
+              cylindrical_tangential_velocity=False),
+    weight_field=None,
+    extrema=dict(radius=(0,40),
+                 cylindrical_tangential_velocity=(-250, 250)),
+    )
+
+plot = yt.PhasePlot.from_profile(profile)
+plot.set_cmap("cell_mass", "YlOrRd")
+
+plot.save()

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/cookbook/customized_profile_plot.py
--- /dev/null
+++ b/doc/source/cookbook/customized_profile_plot.py
@@ -0,0 +1,30 @@
+import yt
+import yt.units as u
+
+ds = yt.load('HiresIsolatedGalaxy/DD0044/DD0044')
+
+center = [0.53, 0.53, 0.53]
+normal = [0,0,1]
+radius = 40*u.kpc
+height = 5*u.kpc
+
+disk = ds.disk(center, [0,0,1], radius, height)
+
+profile = yt.create_profile(
+    data_source=disk,
+    bin_fields=["radius"],
+    fields=["cylindrical_tangential_velocity_absolute"],
+    n_bins=256,
+    units=dict(radius="kpc",
+               cylindrical_tangential_velocity_absolute="km/s"),
+    logs=dict(radius=False),
+    weight_field='cell_mass',
+    extrema=dict(radius=(0,40)),
+    )
+
+plot = yt.ProfilePlot.from_profiles(profile)
+
+plot.set_log('cylindrical_tangential_velocity_absolute', False)
+plot.set_ylim('cylindrical_tangential_velocity_absolute', 60, 160)
+
+plot.save()

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/cookbook/particle_filter_sfr.py
--- /dev/null
+++ b/doc/source/cookbook/particle_filter_sfr.py
@@ -0,0 +1,34 @@
+import yt
+import numpy as np
+from yt.data_objects.particle_filters import add_particle_filter
+from matplotlib import pyplot as plt
+
+def formed_star(pfilter, data):
+    filter = data["all", "creation_time"] > 0
+    return filter
+
+add_particle_filter("formed_star", function=formed_star, filtered_type='all',
+                    requires=["creation_time"])
+
+filename = "IsolatedGalaxy/galaxy0030/galaxy0030"
+
+ds = yt.load(filename)
+ds.add_particle_filter('formed_star')
+ad = ds.all_data()
+masses = ad['formed_star', 'particle_mass'].in_units('Msun')
+formation_time = ad['formed_star', 'creation_time'].in_units('yr')
+
+time_range = [0, 5e8] # years
+n_bins = 1000
+hist, bins = np.histogram(formation_time, bins=n_bins, range=time_range,)
+inds = np.digitize(formation_time, bins=bins)
+time = (bins[:-1] + bins[1:])/2
+
+sfr = np.array([masses[inds == j].sum()/(bins[j+1]-bins[j])
+                for j in range(len(time))])
+sfr[sfr == 0] = np.nan
+
+plt.plot(time/1e6, sfr)
+plt.xlabel('Time  [Myr]')
+plt.ylabel('SFR  [M$_\odot$ yr$^{-1}$]')
+plt.savefig("filter_sfr.png")

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -444,3 +444,41 @@
    p = yt.SlicePlot(ds, 'z', 'density', center='c', width=(20, 'kpc'))
    p.annotate_timestamp(-9, -9)
    p.save()
+
+Annotate Triangle Facets Callback
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. function:: annotate_triangle_facets(triangle_vertices, plot_args=None)
+
+   (This is a proxy for
+   :class:`~yt.visualization.plot_modifications.TriangleFacetsCallback`.)
+
+   This add a line collection of a SlicePlot's plane-intersection 
+   with the triangles to the plot. This callback is ideal for a
+   dataset representing a geometric model of triangular facets.
+
+.. python-script::
+
+   import h5py
+   import os
+   import yt 
+   
+   # Load data file
+   pf = yt.load("MoabTest/fng_usrbin22.h5m")
+
+   # Create the desired slice plot	
+   s = yt.SlicePlot(pf, 'z', ('moab','TALLY_TAG'))
+
+   #get triangle vertices from file (in this case hdf5)
+
+   #setup file path for yt test directory
+   filename = os.path.join(yt.config.ytcfg.get("yt", "test_data_dir"),
+                           "MoabTest/mcnp_n_impr_fluka.h5m")
+   f = h5py.File(filename, "r")
+   coords = f["/tstt/nodes/coordinates"][:]
+   conn = f["/tstt/elements/Tri3/connectivity"][:]
+   points = coords[conn-1]
+
+   # Annotate slice-triangle intersection contours to the plot
+   s.annotate_triangle_facets(points, plot_args={"colors": 'black'})
+   s.save()
\ No newline at end of file

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -1248,7 +1248,7 @@
         List of the binning fields for profiling.
     fields : list of strings
         The fields to be profiled.
-    n : int or list of ints
+    n_bins : int or list of ints
         The number of bins in each dimension.  If None, 64 bins for
         each bin are used for each bin field.
         Default: 64.
@@ -1263,7 +1263,7 @@
         attribute of the field.
     units : dict of strings
         The units of the fields in the profiles, including the bin_fields.
-    weight_field : str
+    weight_field : str or tuple field identifier
         The weight field for computing weighted average for the profile
         values.  If None, the profile values are sums of the data in
         each bin.

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -543,7 +543,7 @@
               min_val, mx, my, mz)
         return min_val, self.arr([mx, my, mz], 'code_length', dtype="float64")
 
-    def find_field_values_at_point(self, fields, coord):
+    def find_field_values_at_point(self, fields, coords):
         """
         Returns the values [field1, field2,...] of the fields at the given
         coordinates. Returns a list of field values in the same order as 

diff -r a443361f7d8e364d69046986529ca7c006c2f096 -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -1173,7 +1173,19 @@
         xax = plot.data.ds.coordinates.x_axis[ax]
         yax = plot.data.ds.coordinates.y_axis[ax]
 
-        l_cy = triangle_plane_intersect(plot.data.axis, plot.data.coord, self.vertices)[:,:,(xax, yax)]
+        if not hasattr(self.vertices, "in_units"):
+            vertices = plot.data.pf.arr(self.vertices, "code_length")
+        else:
+            vertices = self.vertices
+        l_cy = triangle_plane_intersect(plot.data.axis, plot.data.coord, vertices)[:,:,(xax, yax)]
+        # reformat for conversion to plot coordinates
+        l_cy = np.rollaxis(l_cy,0,3)
+        # convert all line starting points
+        l_cy[0] = self.convert_to_plot(plot,l_cy[0])
+        l_cy[1] = self.convert_to_plot(plot,l_cy[1])
+        # convert all line ending points
+        l_cy = np.rollaxis(l_cy,2,0)
+        # create line collection and add it to the plot
         lc = matplotlib.collections.LineCollection(l_cy, **self.plot_args)
         plot._axes.add_collection(lc)
         plot._axes.hold(False)


https://bitbucket.org/yt_analysis/yt/commits/cf5ef5290ce5/
Changeset:   cf5ef5290ce5
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 21:22:08
Summary:     Updating filter docs to reflect that cut regions can operate on both particle
fields and mesh fields.
Affected #:  2 files

diff -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 -r cf5ef5290ce59f365dc10eb15e881d1259f5257b doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -86,6 +86,9 @@
 
 .. notebook:: mesh_filter.ipynb
 
+Cut regions can also operator on particle fields, but a single cut region object
+cannot operate on both particle fields and mesh fields at the same time.
+
 .. _filtering-particles:
 
 Filtering Particle Fields
@@ -146,7 +149,8 @@
 spheres, regions (3D prisms), ellipsoids, disks, and rays.  The `all_data`
 object which gets used throughout this documentation section is an example of 
 a geometric object, but it defaults to including all the data in the dataset
-volume.
+volume.  To see all of the geometric objects available, see 
+:ref:`available-objects`.
 
 Consult the object documentation section for all of the different objects
 one can use, but here is a simple example using a sphere object to filter

diff -r fe6d8d32e7e41e7853d5dc5c16b6aa297bcba491 -r cf5ef5290ce59f365dc10eb15e881d1259f5257b doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -9,7 +9,7 @@
 Data objects (also called *Data Containers*) are used in yt as convenience 
 structures for grouping data in logical ways that make sense in the context 
 of the dataset as a whole.  Some of the data objects are geometrical groupings 
-of data (e.g. sphere, region--a 3D box, cylinder, etc.).  Others represent 
+of data (e.g. sphere, box, cylinder, etc.).  Others represent 
 data products derived from your dataset (e.g. slices, streamlines, surfaces).
 Still other data objects group multiple objects together or filter them
 (e.g. data collection, cut region).  


https://bitbucket.org/yt_analysis/yt/commits/abfffe58312b/
Changeset:   abfffe58312b
Branch:      yt-3.0
User:        chummels
Date:        2014-07-29 21:30:05
Summary:     Updating faq.
Affected #:  1 file

diff -r cf5ef5290ce59f365dc10eb15e881d1259f5257b -r abfffe58312b1fd9bc3872eeb58eb2e90b86d834 doc/source/reference/faq/index.rst
--- a/doc/source/reference/faq/index.rst
+++ b/doc/source/reference/faq/index.rst
@@ -56,7 +56,13 @@
 
 Many different sample datasets can be found at http://yt-project.org/data/ .
 These can be downloaded, unarchived, and they will each create their own
-directory.  If you set the option ``test_data_dir``, in the section ``[yt]``,
+directory.  It is generally straight forward to load these datasets, but if
+you have any questions about loading data from a code with which you are 
+unfamiliar, please visit :ref:`loading-data`.
+
+To make things easier to load these sample datasets, you can add the parent
+directory to your downloaded sample data to your *yt path*.
+If you set the option ``test_data_dir``, in the section ``[yt]``,
 in ``~/.yt/config``, ``yt`` will search this path for them.
 
 This means you can download these datasets to ``/big_drive/data_for_yt`` , add
@@ -83,7 +89,7 @@
 
 ``yt`` sets up defaults for many fields for whether or not a field is presented
 in log or linear space. To override this behavior, you can modify the
-``field_info`` dictionary.  For example, if you prefer that ``Density`` not be
+``field_info`` dictionary.  For example, if you prefer that ``density`` not be
 logged, you could type:
 
 .. code-block:: python
@@ -110,8 +116,8 @@
 .. code-block:: python
 
    ds = load("my_data")
-   dd = ds.all_data()
-   potential_field = dd["PotentialField"]
+   ad = ds.all_data()
+   potential_field = ad["PotentialField"]
 
 The same applies to fields you might derive inside your ``yt`` script
 via :ref:`creating-derived-fields`. To check what fields are


https://bitbucket.org/yt_analysis/yt/commits/76f1cf5a7f78/
Changeset:   76f1cf5a7f78
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-07-29 22:16:31
Summary:     Merged in chummels/yt/yt-3.0 (pull request #1087)

Docs Objects Update
Affected #:  26 files

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/_dq_docstrings.inc
--- a/doc/source/analyzing/_dq_docstrings.inc
+++ /dev/null
@@ -1,94 +0,0 @@
-
-
-.. function:: angular_momentum_vector()
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.AngularMomentumVector`.)
-   This function returns the mass-weighted average angular momentum vector.
-
-
-.. function:: bulk_velocity():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.BulkVelocity`.)
-   This function returns the mass-weighted average velocity in the object.
-
-
-.. function:: center_of_mass(use_cells=True, use_particles=False):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.CenterOfMass`.)
-   This function returns the location of the center
-   of mass. By default, it computes of the *non-particle* data in the object. 
-   
-   Parameters
-   ----------
-   
-   use_cells : bool
-       If True, will include the cell mass (default: True)
-   use_particles : bool
-       if True, will include the particles in the object (default: False)
-
-
-
-.. function:: extrema(fields, non_zero=False, filter=None):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.Extrema`.)
-   This function returns the extrema of a set of fields
-   
-   :param fields: A field name, or a list of field names
-   :param filter: a string to be evaled to serve as a data filter.
-
-
-.. function:: max_location(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.max_location`.)
-   This function returns the location of the maximum of a set
-   of fields.
-
-
-.. function:: min_location(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.MinLocation`.)
-   This function returns the location of the minimum of a set
-   of fields.
-
-
-
-.. function:: spin_parameter(use_gas=True, use_particles=True):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.SpinParameter`.)
-   This function returns the spin parameter for the baryons, but it uses
-   the particles in calculating enclosed mass.
-
-
-.. function:: total_mass():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalMass`.)
-   This function takes no arguments and returns the sum of cell masses and
-   particle masses in the object.
-
-
-.. function:: total_quantity(fields):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalQuantity`.)
-   This function sums up a given field over the entire region
-   
-   :param fields: The fields to sum up
-
-
-
-.. function:: weighted_average_quantity(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`.)
-   This function returns an averaged quantity.
-   
-   :param field: The field to average
-   :param weight: The field to weight by
-
-.. function:: weighted_variance(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedVariance`.)
-    This function returns the variance of a field.
-
-    :param field: The target field
-    :param weight: The field to weight by
-
-    Returns the weighted variance and the weighted mean.

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ /dev/null
@@ -1,150 +0,0 @@
-
-
-.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
-
-
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
-
-
-.. class:: cut_region(self, base_region, field_cuts, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.InLineExtractedRegionBase`.)
-
-
-.. class:: cutting(self, normal, center, fields=None, node_name=None, north_vector=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
-
-
-.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
-
-
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
-
-
-.. class:: extracted_region(self, base_region, indices, force_refresh=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.ExtractedRegionBase`.)
-
-
-.. class:: fixed_res_cutting(self, normal, center, width, dims, fields=None, node_name=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
-
-
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
-
-
-.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
-
-
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
-
-
-.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
-
-
-.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
-
-
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
-
-
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
-
-
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
-
-
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
-
-
-.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
-
-
-.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
-
-
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
-
-
-.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
-
-
-.. class:: smoothed_covering_grid(self, *args, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
-
-
-.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
-
-
-.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)
-
-
-.. class:: surface(self, data_source, surface_field, field_value):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSurfaceBase`.)

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/creating_derived_fields.rst
--- a/doc/source/analyzing/creating_derived_fields.rst
+++ /dev/null
@@ -1,209 +0,0 @@
-.. _creating-derived-fields:
-
-Creating Derived Fields
-=======================
-
-One of the more powerful means of extending ``yt`` is through the usage of derived
-fields.  These are fields that describe a value at each cell in a simulation.
-
-Defining a New Field
---------------------
-
-So once a new field has been conceived of, the best way to create it is to
-construct a function that performs an array operation -- operating on a 
-collection of data, neutral to its size, shape, and type. (All fields should
-be provided as 64-bit floats.)
-
-A simple example of this is the pressure field, which demonstrates the ease of
-this approach.
-
-.. code-block:: python
-
-   import yt
-
-   def _pressure(field, data):
-       return (data.ds.gamma - 1.0) * \
-              data["density"] * data["thermal_energy"]
-
-Note that we do a couple different things here.  We access the "gamma"
-parameter from the dataset, we access the "density" field and we access
-the "thermal_energy" field.  "thermal_energy" is, in fact, another derived field!
-("thermal_energy" deals with the distinction in storage of energy between dual
-energy formalism and non-DEF.)  We don't do any loops, we don't do any
-type-checking, we can simply multiply the three items together.
-
-Once we've defined our function, we need to notify ``yt`` that the field is
-available.  The :func:`add_field` function is the means of doing this; it has a
-number of fairly specific parameters that can be passed in, but here we'll only
-look at the most basic ones needed for a simple scalar baryon field.
-
-.. code-block:: python
-
-   yt.add_field("pressure", function=_pressure, units="dyne/cm**2")
-
-We feed it the name of the field, the name of the function, and the
-units.  Note that the units parameter is a "raw" string, in the format that ``yt`` uses
-in its `symbolic units implementation <units>`_ (e.g., employing only unit names, numbers,
-and mathematical operators in the string, and using ``"**"`` for exponentiation). We suggest
-that you name the function that creates a derived field with the intended field name prefixed
-by a single underscore, as in the ``_pressure`` example above.
-
-:func:`add_field` can be invoked in two other ways. The first is by the function
-decorator :func:`derived_field`. The following code is equivalent to the previous
-example:
-
-.. code-block:: python
-
-   from yt import derived_field
-
-   @derived_field(name="pressure", units="dyne/cm**2")
-   def _pressure(field, data):
-       return (data.ds.gamma - 1.0) * \
-              data["density"] * data["thermal_energy"]
-
-The :func:`derived_field` decorator takes the same arguments as :func:`add_field`,
-and is often a more convenient shorthand in cases where you want to quickly set up
-a new field.
-
-Defining derived fields in the above fashion must be done before a dataset is loaded,
-in order for the dataset to recognize it. If you want to set up a derived field after you
-have loaded a dataset, or if you only want to set up a derived field for a particular
-dataset, there is an :meth:`add_field` method that hangs off dataset objects. The calling
-syntax is the same:
-
-.. code-block:: python
-
-   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   ds.add_field("pressure", function=_pressure, units="dyne/cm**2")
-
-If you find yourself using the same custom-defined fields over and over, you
-should put them in your plugins file as described in :ref:`plugin-file`.
-
-A More Complicated Example
---------------------------
-
-But what if we want to do something a bit more fancy?  Here's an example of getting
-parameters from the data object and using those to define the field;
-specifically, here we obtain the ``center`` and ``bulk_velocity`` parameters
-and use those to define a field for radial velocity (there is already a ``"radial_velocity"``
-field in ``yt``, but we create this one here just as a transparent and simple example).
-
-.. code-block:: python
-
-   from yt.fields.api import ValidateParameter
-   import numpy as np
-
-   def _my_radial_velocity(field, data):
-       if data.has_field_parameter("bulk_velocity"):
-           bv = data.get_field_parameter("bulk_velocity").in_units("cm/s")
-       else:
-           bv = data.ds.arr(np.zeros(3), "cm/s")
-       xv = data["gas","velocity_x"] - bv[0]
-       yv = data["gas","velocity_y"] - bv[1]
-       zv = data["gas","velocity_z"] - bv[2]
-       center = data.get_field_parameter('center')
-       x_hat = data["x"] - center[0]
-       y_hat = data["y"] - center[1]
-       z_hat = data["z"] - center[2]
-       r = np.sqrt(x_hat*x_hat+y_hat*y_hat+z_hat*z_hat)
-       x_hat /= r
-       y_hat /= r
-       z_hat /= r
-       return xv*x_hat + yv*y_hat + zv*z_hat
-   yt.add_field("my_radial_velocity",
-                function=_my_radial_velocity,
-                units="cm/s",
-                take_log=False,
-                validators=[ValidateParameter('center'),
-                            ValidateParameter('bulk_velocity')])
-
-Note that we have added a few parameters below the main function; we specify
-that we do not wish to display this field as logged, that we require both
-``bulk_velocity`` and ``center`` to be present in a given data object we wish
-to calculate this for, and we say that it should not be displayed in a
-drop-down box of fields to display. This is done through the parameter
-*validators*, which accepts a list of :class:`FieldValidator` objects. These
-objects define the way in which the field is generated, and when it is able to
-be created. In this case, we mandate that parameters *center* and
-*bulk_velocity* are set before creating the field. These are set via
-:meth:`~yt.data_objects.data_containers.set_field_parameter`, which can 
-be called on any object that has fields:
-
-.. code-block:: python
-
-   ds = yt.load("GasSloshing/sloshing_nomag2_hdf5_plt_cnt_0100")
-   sp = ds.sphere("max", (200.,"kpc"))
-   sp.set_field_parameter("bulk_velocity", yt.YTArray([-100.,200.,300.], "km/s"))
-
-In this case, we already know what the *center* of the sphere is, so we do not set it. Also,
-note that *center* and *bulk_velocity* need to be :class:`YTArray` objects with units.
-
-Other examples for creating derived fields can be found in the cookbook recipes
-:ref:`cookbook-simple-derived-fields` and :ref:`cookbook-complex-derived-fields`.
-
-.. _derived-field-options:
-
-Field Options
--------------
-
-The arguments to :func:`add_field` are passed on to the constructor of :class:`DerivedField`.
-There are a number of options available, but the only mandatory ones are ``name``,
-``units``, and ``function``.
-
-   ``name``
-     This is the name of the field -- how you refer to it.  For instance,
-     ``pressure`` or ``magnetic_field_strength``.
-   ``function``
-     This is a function handle that defines the field
-   ``units``
-     This is a string that describes the units. Powers must be in
-     Python syntax (``**`` instead of ``^``).
-   ``display_name``
-     This is a name used in the plots, for instance ``"Divergence of
-     Velocity"``.  If not supplied, the ``name`` value is used.
-   ``take_log``
-     This is *True* or *False* and describes whether the field should be logged
-     when plotted.
-   ``particle_type``
-     Is this field a *particle* field?
-   ``validators``
-     (*Advanced*) This is a list of :class:`FieldValidator` objects, for instance to mandate
-     spatial data.
-   ``display_field``
-     (*Advanced*) Should this field appear in the dropdown box in Reason?
-   ``not_in_all``
-     (*Advanced*) If this is *True*, the field may not be in all the grids.
-   ``output_units``
-     (*Advanced*) For fields that exist on disk, which we may want to convert to other
-     fields or that get aliased to themselves, we can specify a different
-     desired output unit than the unit found on disk.
-
-Units for Cosmological Datasets
--------------------------------
-
-``yt`` has additional capabilities to handle the comoving coordinate system used
-internally in cosmological simulations. Simulations that use comoving
-coordinates, all length units have three other counterparts correspoding to
-comoving units, scaled comoving units, and scaled proper units. In all cases
-'scaled' units refer to scaling by the reduced Hubble parameter - i.e. the length
-unit is what it would be in a universe where Hubble's parameter is 100 km/s/Mpc.
-
-To access these different units, yt has a common naming system. Scaled units are denoted by
-dividing by the scaled Hubble parameter ``h`` (which is in itself a unit). Comoving
-units are denoted by appending ``cm`` to the end of the unit name.
-
-Using the parsec as an example,
-
-``pc``
-    Proper parsecs, :math:`\rm{pc}`.
-
-``pccm``
-    Comoving parsecs, :math:`\rm{pc}/(1+z)`.
-
-``pccm/h``
-    Comoving parsecs normalized by the scaled hubble constant, :math:`\rm{pc}/h/(1+z)`.
-
-``pc/h``
-    Proper parsecs, normalized by the scaled hubble constant, :math:`\rm{pc}/h`.
-
-Further examples of this functionality are shown in :ref:`comoving_units_and_code_units`.

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/fields.rst
--- a/doc/source/analyzing/fields.rst
+++ b/doc/source/analyzing/fields.rst
@@ -169,6 +169,27 @@
 
 .. include reference here once it's done
 
+The full list of fields available for a dataset can be found as 
+the attribute ``field_list`` for native, on-disk fields and ``derived_field_list``
+for derived fields (``derived_field_list`` is a superset of ``field_list``).
+You can view these lists by examining a dataset like this:
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   print ds.field_list
+   print ds.derived_field_list
+
+By using the ``field_info()`` class, one can access information about a given
+field, like its default units or the source code for it.  
+
+.. code-block:: python
+
+   ds = yt.load("my_data")
+   ds.index
+   print ds.field_info["gas", "pressure"].get_units()
+   print ds.field_info["gas", "pressure"].get_source()
+
 Particle Fields
 ---------------
 
@@ -190,4 +211,3 @@
 a field for the smoothing length ``h``, which is roughly equivalent to 
 ``(m/\rho)^{1/3}``, where ``m`` and ``rho`` are the particle mass and density 
 respectively.  This can be useful for doing neighbour finding.
-

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/filtering.rst
--- a/doc/source/analyzing/filtering.rst
+++ b/doc/source/analyzing/filtering.rst
@@ -75,6 +75,8 @@
     print 'Density of "overpressure and fast" data: ad["density"][overpressure_and_fast] = \n%s' % \
            ad['density'][overpressure_and_fast]
 
+.. _cut-regions:
+
 Cut Regions
 ^^^^^^^^^^^
 
@@ -84,6 +86,9 @@
 
 .. notebook:: mesh_filter.ipynb
 
+Cut regions can also operator on particle fields, but a single cut region object
+cannot operate on both particle fields and mesh fields at the same time.
+
 .. _filtering-particles:
 
 Filtering Particle Fields
@@ -144,7 +149,8 @@
 spheres, regions (3D prisms), ellipsoids, disks, and rays.  The `all_data`
 object which gets used throughout this documentation section is an example of 
 a geometric object, but it defaults to including all the data in the dataset
-volume.
+volume.  To see all of the geometric objects available, see 
+:ref:`available-objects`.
 
 Consult the object documentation section for all of the different objects
 one can use, but here is a simple example using a sphere object to filter

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/index.rst
--- a/doc/source/analyzing/index.rst
+++ b/doc/source/analyzing/index.rst
@@ -6,13 +6,12 @@
 .. toctree::
    :maxdepth: 2
 
+   fields
    objects
    units/index
-   fields
-   creating_derived_fields
    filtering
    generating_processed_data
    time_series_analysis
+   parallel_computation
    external_analysis
-   parallel_computation
    analysis_modules/index

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -1,136 +1,340 @@
-.. _using-objects:
+.. _data-objects:
 
-Using and Manipulating Objects and Fields
-=========================================
+Data Objects
+============
+
+What are Data Objects in yt?
+----------------------------
+
+Data objects (also called *Data Containers*) are used in yt as convenience 
+structures for grouping data in logical ways that make sense in the context 
+of the dataset as a whole.  Some of the data objects are geometrical groupings 
+of data (e.g. sphere, box, cylinder, etc.).  Others represent 
+data products derived from your dataset (e.g. slices, streamlines, surfaces).
+Still other data objects group multiple objects together or filter them
+(e.g. data collection, cut region).  
 
 To generate standard plots, objects rarely need to be directly constructed.
 However, for detailed data inspection as well as hand-crafted derived data,
 objects can be exceptionally useful and even necessary.
 
-.. _types_of_fields:
-
-What Types of Fields are There?
+How to Create and Use an Object
 -------------------------------
 
-``yt`` makes a distinction between two types of fields.
-
- * Fields it might expect to find on disk
- * Fields it has to generate in memory
-
-With the 2.3 release of ``yt``, the distinction between these has become more
-clear.  This enables much better specification of which fields are expected to
-exist, and to provide fallbacks for calculating them.  For instance you can now
-say, "temperature" might exist, but if it doesn't, here's how you calculate it.
-This also provides easier means of translating fields between different
-frontends.  For instance, FLASH may refer to the temperature field as "temp"
-while Enzo calls it "temperature".  Translator functions ensure that any
-derived field relying on "temp" or "temperature" works with both output types.
-
-When a field is requested, the dataset object first looks to see if that field
-exists on disk.  If it does not, it then queries the list of code-specific
-derived fields.  If it finds nothing there, it then defaults to examining the
-global set of derived fields.
-
-To add a derived field, which is not expected to necessarily exist on disk, use
-the standard construction:
+To create an object, you usually only need a loaded dataset, the name of 
+the object type, and the relevant parameters for your object.  Here is a common
+example for creating a ``Region`` object that covers all of your data volume.
 
 .. code-block:: python
 
-   add_field("specific_thermal_energy", function=_specific_thermal_energy,
-             units="ergs/g")
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   ad = ds.all_data()
 
-where ``_specific_thermal_energy`` is a python function that defines the field.
-
-.. _accessing-fields:
-
-Accessing Fields in Objects
----------------------------
-
-``yt`` utilizes load-on-demand objects to represent physical regions in space.
-(see :ref:`how-yt-thinks-about-data`.)  Data objects in ``yt`` all respect the following
-protocol for accessing data:
+Alternatively, we could create a sphere object of radius 1 kpc on location 
+[0.5, 0.5, 0.5]:
 
 .. code-block:: python
 
-   my_object["density"]
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
 
-where ``"density"`` can be any field name and ``"my_object"`` any one of
-the possible data containers listed at :ref:`available-objects`. For
-example, if we wanted to look at the temperature of cells within a
-spherical region of radius 10 kpc, centered at [0.5, 0.5, 0.5] in our
-simulation box, we would create a sphere object with:
+After an object has been created, it can be used as a data_source to certain
+tasks like ``ProjectionPlot`` (see 
+:class:`~yt.visualization.plot_window.ProjectionPlot`), one can compute the 
+bulk quantities associated with that object (see :ref:`derived-quantities`), 
+or the data can be examined directly. For example, if you want to figure out 
+the temperature at all indexed locations in the central sphere of your 
+dataset you could:
 
 .. code-block:: python
 
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
+   import yt
+   ds = yt.load("RedshiftOutput0005")
+   sp = ds.sphere([0.5, 0.5, 0.5], (1, 'kpc'))
 
-and then look at the temperature of its cells within it via:
-
-.. code-block:: python
-
+   # Show all temperature values
    print sp["temperature"]
 
-Information about how to create a new type of object can be found in
-:ref:`creating-objects`. The field is returned as a single, flattened
-array without spatial information.  The best mechanism for
-manipulating spatial data is the :class:`~yt.data_objects.data_containers.AMRCoveringGridBase` object.
-
-The full list of fields that are available can be found as a property of the
-Hierarchy or Static Output object that you wish to access.  This property is
-calculated every time the object is instantiated.  The full list of fields that
-have been identified in the output file, which need no processing (besides unit
-conversion) are in the property ``field_list`` and the full list of
-potentially-accessible derived fields is available in the property
-``derived_field_list``.  You can see these by examining the two properties:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_list
-   print ds.derived_field_list
-
-When a field is added, it is added to a container that hangs off of the
-dataset, as well.  All of the field creation options
-(:ref:`derived-field-options`) are accessible through this object:
-
-.. code-block:: python
-
-   ds = yt.load("my_data")
-   print ds.field_info["pressure"].get_units()
-
-This is a fast way to examine the units of a given field, and additionally you
-can use :meth:`yt.utilities.pydot.get_source` to get the source code:
-
-.. code-block:: python
-
-   field = ds.field_info["pressure"]
-   print field.get_source()
+   # Print things in a more human-friendly manner: one temperature at a time
+   print "(x,  y,  z) Temperature"
+   print "-----------------------"
+   for i in range(sp["temperature"].size):
+       print "(%f,  %f,  %f)    %f" % (sp["x"][i], sp["y"][i], sp["z"][i], sp["temperature"][i])
 
 .. _available-objects:
 
 Available Objects
 -----------------
 
-Objects are instantiated by direct access of a index.  Each of the objects
-that can be generated by a index are in fact fully-fledged data objects
-respecting the standard protocol for interaction.
+As noted above, there are numerous types of objects.  Here we group them
+into:
 
-The following objects are available, all of which hang off of the index
-object.  To access them, you would do something like this (as for a
-:class:`region`):
+* *Geometric Objects* - Data is selected based on spatial shapes in the dataset
+* *Filtering Objects* - Data is selected based on other field criteria
+* *Collection Objects* - Multiple objects grouped together
+* *Construction Objects* - Objects represent some sort of data product 
+constructed by additional analysis
+
+If you want to create your own custom data object type, see 
+:ref:`creating-objects`.
+
+Geometric Objects
+^^^^^^^^^^^^^^^^^
+
+For 0D, 1D, and 2D geometric objects, if the extent of the object
+intersects a grid cell, then the cell is included in the object; however,
+for 3D objects the *center* of the cell must be within the object in order
+for the grid cell to be incorporated.
+
+0D Objects
+""""""""""
+
+**Point** 
+    | Class :class:`~yt.data_objects.data_containers.YTPointBase`    
+    | Usage: ``point(coord, ds=None, field_parameters=None)``
+    | A point defined by a single cell at specified coordinates.
+
+1D Objects
+""""""""""
+
+**Ray (Axis-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTOrthoRayBase`
+    | Usage: ``ortho_ray(axis, coord, ds=None, field_parameters=None)``
+    | A line (of data cells) stretching through the full domain 
+      aligned with one of the x,y,z axes.  Defined by an axis and a point
+      to be intersected.
+
+**Ray (Arbitrarily-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTRayBase`
+    | Usage: ``ray(start_coord, end_coord, ds=None, field_parameters=None)``
+    | A line (of data cells) defined by arbitrary start and end coordinates. 
+
+2D Objects
+""""""""""
+
+**Slice (Axis-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTSliceBase`
+    | Usage: ``slice(axis, coord, center=None, ds=None, field_parameters=None)``
+    | A plane normal to one of the axes and intersecting a particular 
+      coordinate.
+
+**Slice (Arbitrarily-Aligned)** 
+    | Class :class:`~yt.data_objects.data_containers.YTCuttingPlaneBase`
+    | Usage: ``cutting(normal, coord, north_vector=None, ds=None, field_parameters=None)``
+    | A plane normal to a specified vector and intersecting a particular 
+      coordinate.
+
+3D Objects
+""""""""""
+
+**All Data** 
+    | Class :fun:`~yt.data_objects.static_output.Dataset.all_data`
+    | Usage: ``all_data(find_max=False)``
+    | ``all_data()`` is a wrapper on the Box Region class which defaults to 
+      creating a Region covering the entire dataset domain.  It is effectively 
+      ``ds.region(ds.domain_center, ds.domain_left_edge, ds.domain_right_edge)``.
+
+**Box Region** 
+    | Class :class:`~yt.data_objects.data_containers.YTRegionBase`
+    | Usage: ``region(center, left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
+    | Alternatively: ``box(left_edge, right_edge, fields=None, ds=None, field_parameters=None)``
+    | A box-like region aligned with the grid axis orientation.  It is 
+      defined by a left_edge, a right_edge, and a center.  The left_edge
+      and right_edge are the minimum and maximum bounds in the three axes
+      respectively.  The center is arbitrary and must only be contained within
+      the left_edge and right_edge.  By using the ``box`` wrapper, the center
+      is assumed to be the midpoint between the left and right edges.
+
+**Disk/Cylinder** 
+    | Class: :class:`~yt.data_objects.data_containers.YTDiskBase`
+    | Usage: ``disk(center, normal, radius, height, fields=None, ds=None, field_parameters=None)``
+    | A cylinder defined by a point at the center of one of the circular bases,
+      a normal vector to it defining the orientation of the length of the
+      cylinder, and radius and height values for the cylinder's dimensions.
+
+**Ellipsoid** 
+    | Class :class:`~yt.data_objects.data_containers.YTEllipsoidBase`
+    | Usage: ``ellipsoid(center, semi_major_axis_length, semi_medium_axis_length, semi_minor_axis_length, semi_major_vector, tilt, fields=None, ds=None, field_parameters=None)``
+    | An ellipsoid with axis magnitudes set by semi_major_axis_length, 
+     semi_medium_axis_length, and semi_minor_axis_length.  semi_major_vector 
+     sets the direction of the semi_major_axis.  tilt defines the orientation 
+     of the semi-medium and semi_minor axes.
+
+**Sphere** 
+    | Class :class:`~yt.data_objects.data_containers.YTSphereBase`
+    | Usage: ``sphere(center, radius, ds=None, field_parameters=None)``
+    | A sphere defined by a central coordinate and a radius.
+
+
+Filtering and Collection Objects
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+See also the section on :ref:`filtering-data`.
+
+**Boolean Regions** 
+    | **Note: not yet implemented in yt 3.0**
+    | Usage: ``boolean()``
+    See :ref:`boolean-data-objects`.
+
+**Filter** 
+    | Class :class:`~yt.data_objects.data_containers.YTCutRegionBase`
+    | Usage: ``cut_region(base_object, conditionals, ds=None, field_parameters=None)``
+    | A ``cut_region`` is a filter which can be applied to any other data 
+      object.  The filter is defined by the conditionals present, which 
+      apply cuts to the data in the object.  A ``cut_region`` will work
+      for either particle fields or mesh fields, but not on both simulaneously.
+      For more detailed information and examples, see :ref:`cut-regions`.
+
+**Collection of Data Objects** 
+    | Class :class:`~yt.data_objects.data_containers.YTDataCollectionBase`
+    | Usage: ``data_collection(center, obj_list, ds=None, field_parameters=None)``
+    | A ``data_collection`` is a list of data objects that can be 
+      sampled and processed as a whole in a single data object.
+
+Construction Objects
+^^^^^^^^^^^^^^^^^^^^
+
+**Fixed-Resolution Region** 
+    | Class :class:`~yt.data_objects.data_containers.YTCoveringGridBase`
+    | Usage: ``covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
+    | A 3D region with all data extracted to a single, specified resolution.
+      See :ref:`examining-grid-data-in-a-fixed-resolution-array`.
+
+**Fixed-Resolution Region with Smoothing** 
+    | Class :class:`~yt.data_objects.data_containers.YTSmoothedCoveringGridBase`
+    | Usage: ``smoothed_covering_grid(level, left_edge, dimensions, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, field_parameters=None)``
+    | A 3D region with all data extracted and interpolated to a single, 
+      specified resolution.  Identical to covering_grid, except that it 
+      interpolates as necessary from coarse regions to fine.  See 
+      :ref:`examining-grid-data-in-a-fixed-resolution-array`.
+
+**Fixed-Resolution Region for Particle Deposition** 
+    | Class :class:`~yt.data_objects.data_containers.YTArbitraryGridBase`
+    | Usage: ``arbitrary_grid(left_edge, right_edge, dimensions, ds=None, field_parameters=None)``
+    | When particles are deposited on to mesh fields, they use the existing
+      mesh structure, but this may have too much or too little resolution
+      relative to the particle locations (or it may not exist at all!).  An
+      `arbitrary_grid` provides a means for generating a new independent mesh 
+      structure for particle deposition.  See :ref:`arbitrary-grid` for more 
+      information.
+
+**Projection** 
+    | Class :class:`~yt.data_objects.data_containers.YTQuadTreeProjBase`
+    | Usage: ``proj(field, axis, weight_field=None, center=None, ds=None, data_source=None, style="integrate", field_parameters=None)``
+    | A 2D projection of a 3D volume along one of the axis directions.  
+      By default, this is a line integral through the entire simulation volume 
+      (although it can be a subset of that volume specified by a data object
+      with the ``data_source`` keyword).  Alternatively, one can specify 
+      a weight_field and different ``style`` values to change the nature
+      of the projection outcome.  See :ref:`projection-types` for more information.
+
+**Streamline** 
+    | Class :class:`~yt.data_objects.data_containers.YTStreamlineBase`
+    | Usage: ``streamline(coord_list, length, fields=None, ds=None, field_parameters=None)``
+    | A ``streamline`` can be traced out by identifying a starting coordinate (or 
+      list of coordinates) and allowing it to trace a vector field, like gas
+      velocity.  See :ref:`streamlines` for more information.
+
+**Surface** 
+    | Class :class:`~yt.data_objects.data_containers.YTSurfaceBase`
+    | Usage: ``surface(data_source, field, field_value)``
+    | The surface defined by all an isocontour in any mesh field.  An existing 
+      data object must be provided as the source, as well as a mesh field
+      and the value of the field which you desire the isocontour.  See 
+      :ref:`extracting-isocontour-information`.
+
+.. _derived-quantities:
+
+Processing Objects: Derived Quantities
+--------------------------------------
+
+Derived quantities are a way of calculating some bulk quantities associated
+with all of the grid cells contained in a data object.  
+Derived quantities can be accessed via the ``quantities`` interface.
+Here is an example of how to get the angular momentum vector calculated from 
+all the cells contained in a sphere at the center of our dataset.
 
 .. code-block:: python
 
-   import yt
-   ds = yt.load("RedshiftOutput0005")
-   reg = ds.region([0.5, 0.5, 0.5], [0.0, 0.0, 0.0], [1.0, 1.0, 1.0])
+   ds = load("my_data")
+   sp = ds.sphere('c', (10, 'kpc'))
+   print ad.quantities.angular_momentum_vector()
 
-.. include:: _obj_docstrings.inc
+Available Derived Quantities
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+**Angular Momentum Vector**
+    | Class :class:`~yt.data_objects.derived_quantities.AngularMomentumVector`
+    | Usage: ``angular_momentum_vector(use_gas=True, use_particles=True)``
+    | The mass-weighted average angular momentum vector of the particles, gas, 
+      or both.
+
+**Bulk Velocity**
+    | Class :class:`~yt.data_objects.derived_quantities.BulkVelocity`
+    | Usage: ``bulk_velocity(use_gas=True, use_particles=True)``
+    | The mass-weighted average velocity of the particles, gas, or both.
+
+**Center of Mass**
+    | Class :class:`~yt.data_objects.derived_quantities.CenterOfMass`
+    | Usage: ``center_of_mass(use_cells=True, use_particles=False)``
+    | The location of the center of mass. By default, it computes of 
+      the *non-particle* data in the object, but it can be used on 
+      particles, gas, or both.
+
+**Extrema**
+    | Class :class:`~yt.data_objects.derived_quantities.Extrema`
+    | Usage: ``extrema(fields, non_zero=False)``
+    | The extrema of a field or list of fields.
+
+**Maximum Location**
+    | Class :class:`~yt.data_objects.derived_quantities.max_location`
+    | Usage: ``max_location(fields)``
+    | The maximum of a field or list of fields as well
+      as the x,y,z location of that maximum.
+
+**Minimum Location**
+    | Class :class:`~yt.data_objects.derived_quantities.min_location`
+    | Usage: ``min_location(fields)``
+    | The minimum of a field or list of fields as well
+      as the x,y,z location of that minimum.
+
+**Spin Parameter**
+    | Class :class:`~yt.data_objects.derived_quantities.SpinParameter`
+    | Usage: ``spin_parameter(use_gas=True, use_particles=True)``
+    | The spin parameter for the baryons using the particles, gas, or both.
+
+**Total Mass**
+    | Class :class:`~yt.data_objects.derived_quantities.TotalMass`
+    | Usage: ``total_mass()``
+    | The total mass of the object as a tuple of (total gas, total particle)
+      mass.
+
+**Total of a Field**
+    | Class :class:`~yt.data_objects.derived_quantities.TotalQuantity`
+    | Usage: ``total_quantity(fields)``
+    | The sum of a given field (or list of fields) over the entire object.
+
+**Weighted Average of a Field**
+    | Class :class:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`
+    | Usage: ``weighted_average_quantity(fields, weight)``
+    | The weighted average of a field (or list of fields)
+      over an entire data object.  If you want an unweighted average, 
+      then set your weight to be the field: ``ones``.
+
+**Weighted Variance of a Field**
+    | Class :class:`~yt.data_objects.derived_quantities.WeightedVariance`
+    | Usage: ``weighted_variance(fields, weight)``
+    | The weighted variance of a field (or list of fields)
+      over an entire data object and the weighted mean.  
+      If you want an unweighted variance, then 
+      set your weight to be the field: ``ones``.
 
 .. _arbitrary-grid:
 
-Arbitrary Grids
----------------
+Arbitrary Grids Objects for Particle Deposition
+-----------------------------------------------
 
 The covering grid and smoothed covering grid objects mandate that they be
 exactly aligned with the mesh.  This is a
@@ -195,102 +399,10 @@
 Please see the :ref:`cookbook` for some examples of how to use the boolean
 data object.
 
-.. _derived-quantities:
-
-Processing Objects: Derived Quantities
---------------------------------------
-
-Derived quantities are a way of operating on a collection of cells and
-returning a set of values that is fewer in number than the number of cells --
-yt already knows about several.  Every 3D data object (see
-:ref:`using-objects`) provides a mechanism for access to derived quantities.
-These can be accessed via the ``quantities`` interface, like so:
-
-.. code-block:: python
-
-   ds = load("my_data")
-   dd = ds.all_data()
-   dd.quantities.angular_momentum_vector()
-
-The following quantities are available via the ``quantities`` interface.
-
-.. include:: _dq_docstrings.inc
-
-Creating Derived Quantities
-+++++++++++++++++++++++++++
-
-The basic idea is that you need to be able to operate both on a set of data,
-and a set of sets of data.  (If this is not possible, the quantity needs to be
-added with the ``force_unlazy`` option.)
-
-Two functions are necessary.  One will operate on arrays of data, either fed
-from each grid individually or fed from the entire data object at once.  The
-second one takes the results of the first, either as lists of arrays or as
-single arrays, and returns the final values.  For an example, we look at the
-``TotalMass`` function:
-
-.. code-block:: python
-
-   def _TotalMass(data):
-       baryon_mass = data["cell_mass"].sum()
-       particle_mass = data["ParticleMassMsun"].sum()
-       return baryon_mass, particle_mass
-   def _combTotalMass(data, baryon_mass, particle_mass):
-       return baryon_mass.sum() + particle_mass.sum()
-   add_quantity("TotalMass", function=_TotalMass,
-                combine_function=_combTotalMass, n_ret = 2)
-
-Once the two functions have been defined, we then call :func:`add_quantity` to
-tell it the function that defines the data, the collator function, and the
-number of values that get passed between them.  In this case we return both the
-particle and the baryon mass, so we have two total values passed from the main
-function into the collator.
-
-.. _field_cuts:
-
-Cutting Objects by Field Values
--------------------------------
-
-Data objects can be cut by their field values using the ``cut_region`` 
-method.  For example, this could be used to compute the total gas mass within
-a certain temperature range, as in the following example.
-
-.. notebook-cell::
-
-   import yt
-   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = ds.all_data()
-   total_mass = ad.quantities.total_quantity('cell_mass')
-   # now select only gas with 1e5 K < T < 1e7 K.
-   new_region = ad.cut_region(['obj["temperature"] > 1e5',
-                               'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities.total_quantity('cell_mass')
-   print "The fraction of mass in this temperature range is %f." % \
-     (cut_mass / total_mass)
-
-The ``cut_region`` function generates a new object containing only the cells 
-that meet all of the specified criteria.  The sole argument to ``cut_region`` 
-is a list of strings, where each string is evaluated with an ``eval`` 
-statement.  ``eval`` is a native Python function that evaluates a string as 
-a Python expression.  Any type of data object can be cut with ``cut_region``.  
-Objects generated with ``cut_region`` can be used in the same way as all 
-other data objects.  For example, a cut region can be visualized by giving 
-it as a data_source to a projection.
-
-.. python-script::
-
-   import yt
-   ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = ds.all_data()
-   new_region = ad.cut_region(['obj["density"] > 1e-29'])
-   plot = yt.ProjectionPlot(ds, "x", "density", weight_field="density",
-                            data_source=new_region)
-   plot.save()
-
 .. _extracting-connected-sets:
 
-Connected Sets
---------------
+Connected Sets and Clump Finding
+--------------------------------
 
 The underlying machinery used in :ref:`clump_finding` is accessible from any
 data object.  This includes the ability to obtain and examine topologically
@@ -318,30 +430,6 @@
 contains :class:`~yt.data_objects.data_containers.AMRExtractedRegionBase`
 objects.  These can be queried just as any other data object.
 
-.. _extracting-isocontour-information:
-
-Extracting Isocontour Information
----------------------------------
-
-``yt`` contains an implementation of the `Marching Cubes
-<http://en.wikipedia.org/wiki/Marching_cubes>`_ algorithm, which can operate on
-3D data objects.  This provides two things.  The first is to identify
-isocontours and return either the geometry of those isocontours or to return
-another field value sampled along that isocontour.  The second piece of
-functionality is to calculate the flux of a field over an isocontour.
-
-Note that these isocontours are not guaranteed to be topologically connected.
-In fact, inside a given data object, the marching cubes algorithm will return
-all isocontours, not just a single connected one.  This means if you encompass
-two clumps of a given density in your data object and extract an isocontour at
-that density, it will include both of the clumps.
-
-To extract geometry or sample a field, call
-:meth:`~yt.data_objects.data_containers.AMR3DData.extract_isocontours`.  To
-calculate a flux, call
-:meth:`~yt.data_objects.data_containers.AMR3DData.calculate_isocontour_flux`.
-both of these operations will run in parallel.
-
 .. _object-serialization:
 
 Storing and Loading Objects
@@ -361,16 +449,14 @@
 has a separate set of serialization operations for 2D objects such as
 projections.
 
-.. _parameter_file_serialization:
-
-``yt`` will save out 3D objects to disk under the presupposition that the
+``yt`` will save out objects to disk under the presupposition that the
 construction of the objects is the difficult part, rather than the generation
 of the data -- this means that you can save out an object as a description of
 how to recreate it in space, but not the actual data arrays affiliated with
 that object.  The information that is saved includes the dataset off of
 which the object "hangs."  It is this piece of information that is the most
-difficult; the object, when reloaded, must be able to reconstruct a parameter
-file from whatever limited information it has in the save file.
+difficult; the object, when reloaded, must be able to reconstruct a dataset
+from whatever limited information it has in the save file.
 
 To do this, ``yt`` is able to identify datasets based on a "hash"
 generated from the base file name, the "CurrentTimeIdentifier", and the
@@ -379,45 +465,19 @@
 conjunction they should be uniquely identifying.  (This process is all done in
 :mod:`~yt.utilities.ParameterFileStorage` via :class:`~yt.utilities.ParameterFileStorage.ParameterFileStore`.)
 
-To save an object, you can either save it in the ``.yt`` file affiliated with
-the index or as a standalone file.  For instance, using
-:meth:`~yt.data_objects.index.save_object` we can save a sphere.
+You can save objects to an output file using the function 
+:meth:`~yt.data_objects.index.save_object`: 
 
 .. code-block:: python
 
    import yt
    ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
+   sp = ds.sphere([0.5, 0.5, 0.5], (10.0, 'kpc'))
+   sp.save_object("sphere_name", "save_file.cpkl")
 
-   ds.save_object(sp, "sphere_to_analyze_later")
-
-
-In a later session, we can load it using
-:meth:`~yt.data_objects.index.load_object`:
-
-.. code-block:: python
-
-   import yt
-
-   ds = yt.load("my_data")
-   sphere_to_analyze = ds.load_object("sphere_to_analyze_later")
-
-Additionally, if we want to store the object independent of the ``.yt`` file,
-we can save the object directly:
-
-.. code-block:: python
-
-   import yt
-
-   ds = yt.load("my_data")
-   sp = ds.sphere([0.5, 0.5, 0.5], 10.0/ds['kpc'])
-
-   sp.save_object("my_sphere", "my_storage_file.cpkl")
-
-This will store the object as ``my_sphere`` in the file
-``my_storage_file.cpkl``, which will be created or accessed using the standard
-python module :mod:`shelve`.  Note that if a filename is not supplied, it will
-be saved via the index, as above.
+This will store the object as ``sphere_name`` in the file
+``save_file.cpkl``, which will be created or accessed using the standard
+python module :mod:`shelve`.  
 
 To re-load an object saved this way, you can use the shelve module directly:
 
@@ -425,17 +485,12 @@
 
    import yt
    import shelve
+   ds = yt.load("my_data") 
+   saved_fn = shelve.open("save_file.cpkl")
+   ds, sp = saved_fn["sphere_name"]
 
-   ds = yt.load("my_data") # not necessary if storeparameterfiles is on
-
-   obj_file = shelve.open("my_storage_file.cpkl")
-   ds, obj = obj_file["my_sphere"]
-
-If you have turned on ``storeparameterfiles`` in your configuration,
-you won't need to load the parameterfile again, as the load process
-will actually do that for you in that case.  Additionally, we can
-store multiple objects in a single shelve file, so we have to call the
-sphere by name.
+Additionally, we can store multiple objects in a single shelve file, so we 
+have to call the sphere by name.
 
 .. note:: It's also possible to use the standard :mod:`cPickle` module for
           loading and storing objects -- so in theory you could even save a
@@ -443,4 +498,3 @@
 
 This method works for clumps, as well, and the entire clump index will be
 stored and restored upon load.
-

diff -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c -r 76f1cf5a7f78987525c183db3d64a0096a5a21f3 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ /dev/null
@@ -1,462 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:2faff88abc93fe2bc9d91467db786a8b69ec3ece6783a7055942ecc7c47a0817"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the past, querying a data object with a field name returned a NumPy `ndarray` . In the new unit system, data object queries will return a `YTArray`, a subclass of `ndarray` that preserves all of the nice properties of `ndarray`, including broadcasting, deep and shallow copies, and views. "
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Selecting data from an object"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`YTArray` is 'unit-aware'.  Let's show how this works in practice using a sample Enzo dataset:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')\n",
-      "          \n",
-      "dd = ds.all_data()\n",
-      "maxval, maxloc = ds.find_max('density')\n",
-      "\n",
-      "dens = dd['density']"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print maxval"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "mass = dd['cell_mass']\n",
-      "\n",
-      "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
-      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
-      "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
-      "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "dx = dd['dx']\n",
-      "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
-      "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
-      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
-      "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Unit conversions"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "YTArray defines several user-visible member functions that allow data to be converted from one unit system to another:\n",
-      "\n",
-      "* `in_units`\n",
-      "* `in_cgs`\n",
-      "* `in_mks`\n",
-      "* `convert_to_units`\n",
-      "* `convert_to_cgs`\n",
-      "* `convert_to_mks`"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The first method, `in_units`, returns a copy of the array in the units denoted by a string argument:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['density'].in_units('Msun/pc**3')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print (dd['pressure'])\n",
-      "print (dd['pressure']).in_cgs()\n",
-      "print (dd['pressure']).in_mks()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The next two methods do in-place conversions:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "dens = dd['density']\n",
-      "print dens\n",
-      "\n",
-      "dens.convert_to_units('Msun/pc**3')\n",
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "One possibly confusing wrinkle when using in-place conversions is if you try to query `dd['density']` again, you'll find that it has been converted to solar masses per cubic parsec:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['density']\n",
-      "\n",
-      "dens.convert_to_units('g/cm**3')\n",
-      "\n",
-      "print dens"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since the unit metadata is preserved and the array values are still correct in the new unit system, all numerical operations will still be correct.\n",
-      "\n",
-      "One of the nicest aspects of this new unit system is that the symbolic algebra for mathematical operations on data with units is performed automatically by sympy.  This example shows how we can construct a field with density units from two other fields that have units of mass and volume:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['cell_mass']\n",
-      "print dd['cell_volume'].in_units('cm**3')\n",
-      "\n",
-      "print (dd['cell_mass']/dd['cell_volume']).in_cgs()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Working with views and converting to ndarray"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "There are two ways to convert the data into a numpy array.  The most straightforward and safe way to do this is to create a copy of the array data.  The following cell demonstrates four equivalent ways of doing this, in increasing degree of terseness."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import numpy as np\n",
-      "\n",
-      "dens = dd['cell_mass']\n",
-      "\n",
-      "print dens.to_ndarray()\n",
-      "print np.array(dens)\n",
-      "print dens.value\n",
-      "print dens.v"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Since we have a copy of the data, we can mess with it however we wish without disturbing the original data returned by the yt data object."
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Another way to touch the raw array data is to get a _view_.  A numpy view is a lightweight array interface to a memory buffer. There are four ways to create views of YTArray instances:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print dd['cell_mass'].ndarray_view()\n",
-      "print dd['cell_mass'].view(np.ndarray)\n",
-      "print dd['cell_mass'].ndview\n",
-      "print dd['cell_mass'].d"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When working with views, rememeber that you are touching the raw array data and no longer have any of the unit checking provided by the unit system.  This can be useful where it might be more straightforward to treat the array as if it didn't have units but without copying the data."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "density_values = dd['density'].d\n",
-      "density_values[0:10] = 0\n",
-      "\n",
-      "# The original array was updated\n",
-      "print dd['density']"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Round-Trip Conversions to and from AstroPy's Units System"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Some examples of converting from AstroPy units to yt:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from astropy import units as u\n",
-      "from yt import YTQuantity, YTArray\n",
-      "\n",
-      "x = 42.0 * u.meter\n",
-      "y = YTQuantity.from_astropy(x) "
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print x, type(x)\n",
-      "print y, type(y)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "a = np.random.random(size=10) * u.km/u.s\n",
-      "b = YTArray.from_astropy(a)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print a, type(a)\n",
-      "print b, type(b)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "temp = dd[\"temperature\"]\n",
-      "atemp = temp.to_astropy()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print temp, type(temp)\n",
-      "print atemp, type(atemp)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "and quantities:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.utilities.physical_constants import kboltz\n",
-      "kb = kboltz.to_astropy()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "print kboltz, type(kboltz)\n",
-      "print kb, type(kb)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "As a sanity check, you can show that it works round-trip:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "k1 = kboltz.to_astropy()\n",
-      "k2 = YTQuantity.from_astropy(kb)\n",
-      "print k1 == k2"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "c = YTArray.from_astropy(a)\n",
-      "d = c.to_astropy()\n",
-      "print a == d"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list