[yt-svn] commit/yt: 3 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 29 23:57:04 PDT 2014


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6bf7153bf3f8/
Changeset:   6bf7153bf3f8
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-05-14 14:03:08
Summary:     Adding dengo and grackle fields to Enzo.
Affected #:  1 file

diff -r 4d1488b8fa476b1e6f5cb90c16312fce75abe34e -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -44,12 +44,23 @@
     'DI'      : 'D',
     'DII'     : 'D_p1',
     'HDI'     : 'HD',
-    'Electron': 'El'
+    'Electron': 'El',
+    'OI'      : 'O',
+    'OII'     : 'O_p1',
+    'OIII'    : 'O_p2',
+    'OIV'     : 'O_p3',
+    'OV'      : 'O_p4',
+    'OVI'     : 'O_p5',
+    'OVII'    : 'O_p6',
+    'OVIII'   : 'O_p7',
+    'OIX'     : 'O_p8',
 }
 
 class EnzoFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Cooling_Time", ("code_time", ["cooling_time"], None)),
+        ("Dengo_Cooling_Rate", ("erg/g/s", [], None)),
+        ("Grackle_Cooling_Rate", ("erg/s/cm**3", [], None)),
         ("HI_kph", ("1/code_time", [], None)),
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),
@@ -142,7 +153,8 @@
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.
-        if self.pf.parameters["MultiSpecies"] > 0:
+        if self.pf.parameters["MultiSpecies"] > 0 or \
+           self.pf.parameters.get("DengoChemistryModel", 0) == 1:
             self.setup_species_fields()
         self.setup_energy_field()
 


https://bitbucket.org/yt_analysis/yt/commits/532acb0e5d12/
Changeset:   532acb0e5d12
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-07-30 01:29:00
Summary:     Merging, fixing conflict
Affected #:  560 files

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -13,8 +13,13 @@
 drudd = drudd at uchicago.edu
 awetzel = andrew.wetzel at yale.edu
 David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins4096 = dcollins4096 at gmail.com
 dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
 jcforbes at ucsc.edu = jforbes at ucolick.org
+ngoldbau at ucsc.edu = goldbaum at ucolick.org
+biondo at wisc.edu = Biondo at wisc.edu
+samgeen at googlemail.com = samgeen at gmail.com
+fbogert = fbogert at ucsc.edu
\ No newline at end of file

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -7,6 +7,7 @@
 rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
@@ -41,6 +42,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -2,15 +2,23 @@
 
 Contributors:   
                 Tom Abel (tabel at stanford.edu)
-                David Collins (dcollins at physics.ucsd.edu)
+                Gabriel Altay (gabriel.altay at gmail.com)
+                Kenza Arraki (karraki at gmail.com)
+                Elliott Biondo (biondo at wisc.edu)
+                Alex Bogert (fbogert at ucsc.edu)
+                Pengfei Chen (madcpf at gmail.com)
+                David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
+                Miguel de Val-Borro (miguel.deval at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
                 John Forces (jforbes at ucolick.org)
+                Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
@@ -21,18 +29,23 @@
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
                 Chris Moody (cemoody at ucsc.edu)
+                Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
                 Jill Naiman (jnaiman at ucolick.org)
+                Desika Narayanan (dnarayan at haverford.edu)
                 Kaylea Nelson (kaylea.nelson at yale.edu)
                 Jeff Oishi (jsoishi at gmail.com)
+                Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
+                John Regan (john.regan at helsinki.fi)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
-                Devin Silvia (devin.silvia at colorado.edu)
+                Pat Shriwise (shriwise at wisc.edu)
+                Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
                 Britton Smith (brittonsmith at gmail.com)
@@ -42,8 +55,10 @@
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)
+                Michael S. Warren (mswarren at gmail.com)
                 Andrew Wetzel (andrew.wetzel at yale.edu)
                 John Wise (jwise at physics.gatech.edu)
+                Michael Zingale (michael.zingale at stonybrook.edu)
                 John ZuHone (jzuhone at gmail.com)
 
 Several items included in the yt/extern directory were written by other

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,4 +12,3 @@
 prune tests
 graft yt/gui/reason/html/resources
 exclude clean.sh .hgchurn
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -3,7 +3,7 @@
 \usepackage{calc}
 \usepackage{ifthen}
 \usepackage[landscape]{geometry}
-\usepackage[colorlinks = true, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref}
+\usepackage[hyphens]{url}
 
 % To make this come out properly in landscape mode, do one of the following
 % 1.
@@ -101,9 +101,13 @@
 Documentation \url{http://yt-project.org/doc/index.html}.
 Need help? Start here \url{http://yt-project.org/doc/help/} and then
 try the IRC chat room \url{http://yt-project.org/irc.html},
-or the mailing list \url{http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org}.
-{\bf Installing yt:} The easiest way to install yt is to use the installation script
-found on the yt homepage or the docs linked above.
+or the mailing list \url{http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org}. \\
+
+\subsection{Installing yt} The easiest way to install yt is to use the
+installation script found on the yt homepage or the docs linked above.  If you
+already have python set up with \texttt{numpy}, \texttt{scipy},
+\texttt{matplotlib}, \texttt{h5py}, and \texttt{cython}, you can also use
+\texttt{pip install yt}
 
 \subsection{Command Line yt}
 yt, and its convenience functions, are launched from a command line prompt.
@@ -118,9 +122,8 @@
 \texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
 \texttt{yt update} \textemdash\ Update yt to most recent version.\\
 \texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
-\texttt{yt instinfo} \textemdash\ yt installation information. \\
+\texttt{yt version} \textemdash\ yt installation information. \\
 \texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt serve} ({\it dataset}) \textemdash\  Run yt-specific web GUI ({\it dataset} is optional).\\
 \texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
 \texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
 \texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
@@ -132,16 +135,8 @@
  paste.yt-project.org. \\ 
 \texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
  STDOUT. \\
- \texttt{yt hub\_register} \textemdash\ Register with
-hub.yt-project.org. \\
-\texttt{yt hub\_submit} \textemdash\ Submit hg repo to
-hub.yt-project.org. \\
-\texttt{yt bootstrap\_dev} \textemdash\ Bootstrap a yt 
-development environment. \\
 \texttt{yt bugreport} \textemdash\ Report a yt bug. \\
 \texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
-\texttt{yt rpdb} \textemdash\ Connect to running rpd 
- session. 
 
 \subsection{yt Imports}
 In order to use yt, Python must load the relevant yt modules into memory.
@@ -149,37 +144,40 @@
 used as part of a script.
 \newlength{\MyLen}
 \settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{from yt.mods import \textasteriskcentered}  \textemdash\ 
-Load base yt  modules. \\
+\texttt{import yt}  \textemdash\ 
+Load yt. \\
 \texttt{from yt.config import ytcfg}  \textemdash\ 
 Used to set yt configuration options.
- If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.api import \textasteriskcentered}   \textemdash\ 
-Load all yt analysis modules. \\
+If used, must be called before importing any other module.\\
 \texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
 Load halo finding modules. Other modules
 are loaded in a similar way by swapping the 
 {\em emphasized} text.
 See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
 
-\subsection{Numpy Arrays}
-Simulation data in yt is returned in Numpy arrays. The Numpy package provides a wealth of built-in
-functions that operate on Numpy arrays. Here is a very brief list of some useful ones.
-Please see \url{http://docs.scipy.org/doc/numpy/reference/} for the full
-numpy documentation.\\
-\settowidth{\MyLen}{\texttt{multicol} }
+\subsection{YTArray}
+Simulation data in yt is returned as a YTArray.  YTArray is a numpy array that
+has unit data attached to it and can automatically handle unit conversions and
+detect unit errors. Just like a numpy array, YTArray provides a wealth of
+built-in functions to calculate properties of the data in the array. Here is a
+very brief list of some useful ones.
+\settowidth{\MyLen}{\texttt{multicol} }\\
+\texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\ 
 \texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
-\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max, 
+\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
 min value of \texttt{a}.\\
 \texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from \texttt{a} between
+\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{a} between
 locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
-\texttt{sel = (a > const)}  \textemdash\ Create a new boolean Numpy array \texttt{sel}, of the same shape as \texttt{a},
+\texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
+\texttt{sel}, of the same shape as \texttt{a},
 that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
-\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of elements from \texttt{a} that correspond to elements of \texttt{sel}
+\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
+elements from \texttt{a} that correspond to elements of \texttt{sel}
 that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.dump({\it filename.dat})} \textemdash\ Save \texttt{a} to the binary file {\it filename.dat}.\\
-\texttt{a = np.load({\it filename.dat})} \textemdash\ Load the contents of {\it filename.dat} into \texttt{a}.
+\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
 
 \subsection{IPython Tips}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -196,6 +194,7 @@
 \texttt{\%hist} \textemdash\ Print recent command history.\\
 \texttt{\%quickref} \textemdash\ Print IPython quick reference.\\
 \texttt{\%pdb} \textemdash\ Automatically enter the Python debugger at an exception.\\
+\texttt{\%debug} \textemdash\ Drop into a debugger at the location of the last unhandled exception. \\
 \texttt{\%time, \%timeit} \textemdash\ Find running time of expressions for benchmarking.\\
 \texttt{\%lsmagic} \textemdash\ List all available IPython magics. Hint: \texttt{?} works with magics.\\
 
@@ -208,68 +207,52 @@
 After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
-\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
-numpy array \texttt{a}. Similarly for other data containers.\\
-\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
-\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+YTArray \texttt{a}. Similarly for other data containers.\\
+\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
-\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
- container. {\it constructor} is a list of pre-defined non-boolean 
- data containers with nested boolean logic using the
- ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
- {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
- by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
- 
-\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
-\subsection{Defining New Fields \& Quantities}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. Quantities reduce a field (e.g. "Density") defined over an object (e.g. "sphere") to get a single value (e.g. "Mass"). \\
-\texttt{def \_MetalMassMsun({\it field},{\it data})}\\
-\texttt{\hspace{4 mm} return data["Metallicity"]*data["CellMassMsun"]}\\
-\texttt{add\_field("MetalMassMsun",function=\_MetalMassMsun)}\\
-Define a new quantity; note the first function operates on grids and data objects and the second on the results of the first. \\
-\texttt{def \_TotalMass(data): }\\
-\texttt{\hspace{4 mm} baryon\_mass = data["CellMassMsun"].sum()}\\
-\texttt{\hspace{4 mm} particle\_mass = data["ParticleMassMsun"].sum()}\\
-\texttt{\hspace{4 mm} return baryon\_mass, particle\_mass}\\
-\texttt{def \_combTotalMass(data, baryon\_mass, particle\_mass):}\\
-\texttt{\hspace{4 mm} return baryon\_mass.sum() + particle\_mass.sum()}\\
-\texttt{add\_quantity("TotalMass", function=\_TotalMass,}\\
-\texttt{\hspace{4 mm} combine\_function=\_combTotalMass, n\_ret = 2)}\\
-
-
+\subsection{Defining New Fields}
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. 
+Field can either be created before a dataset is loaded using \texttt{add\_field}:
+\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
+\texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
+Or added to an existing dataset using \texttt{ds.add\_field}:
+\texttt{ds.add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
+{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
 \texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
-\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -299,51 +282,37 @@
 The \texttt{my\_plugins.py} file \textemdash\ Add functions, derived fields, constants, or other commonly-used Python code to yt.
 
 
-
-
 \subsection{Analysis Modules}
 \settowidth{\MyLen}{\texttt{multicol}}
 The import name for each module is listed at the end of each description (see \textbf{yt Imports}).
 
 \texttt{Absorption Spectrum} \textemdash\ (\texttt{absorption\_spectrum}). \\
 \texttt{Clump Finder} \textemdash\ Find clumps defined by density thresholds (\texttt{level\_sets}). \\
-\texttt{Coordinate Transformation} \textemdash\ (\texttt{coordinate\_transformation}). \\
 \texttt{Halo Finding} \textemdash\ Locate halos of dark matter particles (\texttt{halo\_finding}). \\
-\texttt{Halo Mass Function} \textemdash\ Find halo mass functions from data and from theory (\texttt{halo\_mass\_function}). \\
-\texttt{Halo Profiling} \textemdash\ Profile and project multiple halos (\texttt{halo\_profiler}). \\
-\texttt{Halo Merger Tree} \textemdash\ Create a database of halo mergers (\texttt{halo\_merger\_tree}). \\
 \texttt{Light Cone Generator} \textemdash\ Stitch datasets together to perform analysis over cosmological volumes. \\
 \texttt{Light Ray Generator} \textemdash\ Analyze the path of light rays.\\
-\texttt{Radial Column Density} \textemdash\ Calculate column densities around a point (\texttt{radial\_column\_density}). \\
 \texttt{Rockstar Halo Finding} \textemdash\ Locate halos of dark matter using the Rockstar halo finder (\texttt{halo\_finding.rockstar}). \\
 \texttt{Star Particle Analysis} \textemdash\ Analyze star formation history and assemble spectra (\texttt{star\_analysis}). \\
 \texttt{Sunrise Exporter} \textemdash\ Export data to the sunrise visualization format (\texttt{sunrise\_export}). \\
-\texttt{Two Point Functions} \textemdash\ Two point correlations (\texttt{two\_point\_functions}). \\
 
 
 \subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}}
-Nearly all of yt is parallelized using MPI.
-The {\it mpi4py} package must be installed for parallelism in yt.
-To install {\it pip install mpi4py} on the command line usually works.
+\settowidth{\MyLen}{\texttt{multicol}} 
+Nearly all of yt is parallelized using
+MPI.  The {\it mpi4py} package must be installed for parallelism in yt.  To
+install {\it pip install mpi4py} on the command line usually works.
 Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py --parallel}\\
-This command may differ for each system on which you use yt;
-please consult the system documentation for details on how to run parallel applications.
+{\it mpirun -n 12 python script.py}\\
+The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
+turn on yt's parallelism.  If this doesn't happen, all cores will execute the
+same serial yt script.  This command may differ for each system on which you use
+yt; please consult the system documentation for details on how to run parallel
+applications.
 
-\texttt{from yt.pmods import *} \textemdash\ Load yt faster when in parallel.
-This replaces the usual \texttt{from yt.mods import *}.\\
 \texttt{parallel\_objects()} \textemdash\ A way to parallelize analysis over objects
 (such as halos or clumps).\\
 
 
-\subsection{Pre-Installed Versions}
-\settowidth{\MyLen}{\texttt{multicol}}
-yt is pre-installed on several supercomputer systems.
-
-\textbf{NICS Kraken} \textemdash\ {\it module load yt} \\
-
-
 \subsection{Mercurial}
 \settowidth{\MyLen}{\texttt{multicol}}
 Please see \url{http://mercurial.selenic.com/} for the full Mercurial documentation.
@@ -365,8 +334,7 @@
 \subsection{FAQ}
 \settowidth{\MyLen}{\texttt{multicol}}
 
-\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
-Must enter \texttt{pf.h} before this command. \\
+\texttt{slc.set\_log('field', False)} \textemdash\ When plotting \texttt{field}, use linear scaling instead of log scaling.
 
 
 %\rule{0.3\linewidth}{0.25pt}

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -49,7 +49,7 @@
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
- * Parameter files are a last resort.
+ * Parameter files external to yt are a last resort.
  * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
@@ -61,7 +61,7 @@
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
      parameters are now properties on a Dataset subclass: you access them
-     like pf.refine_by .
+     like ds.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality
      * TopGridDimensions => domain_dimensions

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -73,7 +73,7 @@
     Examples
     --------
     These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
     a plot collection, 'c' for a center, and 'L' for a vector. 
 
     >>> a=[1,2,3]

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -19,7 +19,7 @@
 useful variable names that correspond to specific instances that the user is
 presupposed to have created.
 
-   * `pf`: a parameter file, loaded successfully
+   * `ds`: a dataset, loaded successfully
    * `sp`: a sphere
    * `c`: a 3-component "center"
    * `L`: a 3-component vector that corresponds to either angular momentum or a

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/helper_scripts/parse_cb_list.py
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/visualizing/_cb_docstrings.inc", "w")
 

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/helper_scripts/parse_dq_list.py
--- a/doc/helper_scripts/parse_dq_list.py
+++ b/doc/helper_scripts/parse_dq_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_dq_docstrings.inc", "w")
 
@@ -29,7 +29,7 @@
                             docstring = docstring))
                             #docstring = "\n".join(tw.wrap(docstring))))
 
-dd = pf.h.all_data()
+dd = ds.all_data()
 for n,func in sorted(dd.quantities.functions.items()):
     print n, func
     write_docstring(output, n, func[1])

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/helper_scripts/parse_object_list.py
--- a/doc/helper_scripts/parse_object_list.py
+++ b/doc/helper_scripts/parse_object_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_obj_docstrings.inc", "w")
 
@@ -27,7 +27,7 @@
     f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
                             docstring = 'physical-object-api'))
 
-for n,c in sorted(pf.h.__dict__.items()):
+for n,c in sorted(ds.__dict__.items()):
     if hasattr(c, '_con_args'):
         print n
         write_docstring(output, n, c)

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,15 +17,15 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
+Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
 .. notebook-cell::
 
   from yt.mods import *
-  pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.field_list):
+  ds = load("Enzo_64/DD0043/data0043")
+  for i in sorted(ds.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -567,8 +567,10 @@
 
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
-echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-get_ytdata xray_emissivity.h5
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  cloudy_emissivity.h5' > cloudy_emissivity.h5.sha512
+[ ! -e cloudy_emissivity.h5 ] && get_ytdata cloudy_emissivity.h5
+echo '0f714ae2eace0141b1381abf1160dc8f8a521335e886f99919caf3beb31df1fe271d67c7b2a804b1467949eb16b0ef87a3d53abad0e8160fccac1e90d8d9e85f  apec_emissivity.h5' > apec_emissivity.h5.sha512
+[ ! -e apec_emissivity.h5 ] && get_ytdata apec_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
@@ -586,7 +588,7 @@
 FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.1.3'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.1.0'
+IPYTHON='ipython-2.1.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
@@ -608,14 +610,13 @@
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca  ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
+echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
@@ -624,7 +625,6 @@
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
 echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
@@ -657,7 +657,6 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
-get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -816,6 +815,7 @@
         YT_DIR=`dirname $ORIG_PWD`
     elif [ ! -e yt-hg ]
     then
+        echo "Cloning yt"
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
@@ -824,9 +824,9 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-3.0-hg ] 
+    elif [ -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi
@@ -943,14 +943,19 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar/done ]
+    if [ ! -e rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
-        cd Rockstar
+        if [ ! -e rockstar ]
+        then
+            ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+        fi
+        cd rockstar
+        ( hg pull 2>&1 ) 1>> ${LOG_FILE}
+        ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/_static/agogo_yt.css
--- a/doc/source/_static/agogo_yt.css
+++ /dev/null
@@ -1,41 +0,0 @@
- at import url("agogo.css");
- at import url("http://fonts.googleapis.com/css?family=Crimson+Text");
- at import url("http://fonts.googleapis.com/css?family=Droid+Sans");
-
-div.document ul {
-  margin-left: 1.5em;
-  margin-top: 0.0em;
-  margin-bottom: 1.0em;
-}
-
-div.document li.toctree-l1 {
-  margin-bottom: 0.5em;
-}
-
-table.contentstable {
-  width: 100%;
-}
-
-table.contentstable td {
-  padding: 5px 15px 0px 15px;
-}
-
-table.contentstable tr {
-  border-bottom: 1px solid black;
-}
-
-a.biglink {
-  line-height: 1.2em;
-}
-
-a tt.xref {
-  font-weight: bolder;
-}
-
-table.docutils {
-  width: 100%;
-}
-
-table.docutils td {
-  width: 50%;
-}

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/_static/custom.css
--- /dev/null
+++ b/doc/source/_static/custom.css
@@ -0,0 +1,8 @@
+blockquote {
+    font-size: 16px;
+    border-left: none;
+}
+
+dd {
+    margin-left: 30px;
+}
\ No newline at end of file

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/_templates/layout.html
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -35,3 +35,5 @@
     </div>
 {%- endblock %}
 
+{# Custom CSS overrides #}
+{% set bootswatch_css_custom = ['_static/custom.css'] %}

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/_dq_docstrings.inc
--- a/doc/source/analyzing/_dq_docstrings.inc
+++ /dev/null
@@ -1,165 +0,0 @@
-
-
-.. function:: Action(action, combine_action, filter=None):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._Action`.)
-   This function evals the string given by the action arg and uses 
-   the function thrown with the combine_action to combine the values.  
-   A filter can be thrown to be evaled to short-circuit the calculation 
-   if some criterion is not met.
-   :param action: a string containing the desired action to be evaled.
-   :param combine_action: the function used to combine the answers when done lazily.
-   :param filter: a string to be evaled to serve as a data filter.
-
-
-
-.. function:: AngularMomentumVector():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._AngularMomentumVector`.)
-   This function returns the mass-weighted average angular momentum vector.
-
-
-
-.. function:: BaryonSpinParameter():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._BaryonSpinParameter`.)
-   This function returns the spin parameter for the baryons, but it uses
-   the particles in calculating enclosed mass.
-
-
-
-.. function:: BulkVelocity():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._BulkVelocity`.)
-   This function returns the mass-weighted average velocity in the object.
-
-
-
-.. function:: CenterOfMass(use_cells=True, use_particles=False):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._CenterOfMass`.)
-   This function returns the location of the center
-   of mass. By default, it computes of the *non-particle* data in the object. 
-   
-   Parameters
-   ----------
-   
-   use_cells : bool
-       If True, will include the cell mass (default: True)
-   use_particles : bool
-       if True, will include the particles in the object (default: False)
-
-
-
-.. function:: Extrema(fields, non_zero=False, filter=None):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._Extrema`.)
-   This function returns the extrema of a set of fields
-   
-   :param fields: A field name, or a list of field names
-   :param filter: a string to be evaled to serve as a data filter.
-
-
-
-.. function:: IsBound(truncate=True, include_thermal_energy=False, treecode=True, opening_angle=1.0, periodic_test=False, include_particles=True):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._IsBound`.)
-   This returns whether or not the object is gravitationally bound. If this
-   returns a value greater than one, it is bound, and otherwise not.
-   
-   Parameters
-   ----------
-   truncate : Bool
-       Should the calculation stop once the ratio of
-       gravitational:kinetic is 1.0?
-   include_thermal_energy : Bool
-       Should we add the energy from ThermalEnergy
-       on to the kinetic energy to calculate 
-       binding energy?
-   treecode : Bool
-       Whether or not to use the treecode.
-   opening_angle : Float 
-       The maximal angle a remote node may subtend in order
-       for the treecode method of mass conglomeration may be
-       used to calculate the potential between masses.
-   periodic_test : Bool 
-       Used for testing the periodic adjustment machinery
-       of this derived quantity.
-   include_particles : Bool
-       Should we add the mass contribution of particles
-       to calculate binding energy?
-   
-   Examples
-   --------
-   >>> sp.quantities["IsBound"](truncate=False,
-   ... include_thermal_energy=True, treecode=False, opening_angle=2.0)
-   0.32493
-
-
-
-.. function:: MaxLocation(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._MaxLocation`.)
-   This function returns the location of the maximum of a set
-   of fields.
-
-
-
-.. function:: MinLocation(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._MinLocation`.)
-   This function returns the location of the minimum of a set
-   of fields.
-
-
-
-.. function:: ParticleSpinParameter():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._ParticleSpinParameter`.)
-   This function returns the spin parameter for the baryons, but it uses
-   the particles in calculating enclosed mass.
-
-
-
-.. function:: StarAngularMomentumVector():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._StarAngularMomentumVector`.)
-   This function returns the mass-weighted average angular momentum vector 
-   for stars.
-
-
-
-.. function:: TotalMass():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._TotalMass`.)
-   This function takes no arguments and returns the sum of cell masses and
-   particle masses in the object.
-
-
-
-.. function:: TotalQuantity(fields):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._TotalQuantity`.)
-   This function sums up a given field over the entire region
-   
-   :param fields: The fields to sum up
-
-
-
-.. function:: WeightedAverageQuantity(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._WeightedAverageQuantity`.)
-   This function returns an averaged quantity.
-   
-   :param field: The field to average
-   :param weight: The field to weight by
-
-.. function:: WeightedVariance(field, weight):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._WeightedVariance`.)
-    This function returns the variance of a field.
-
-    :param field: The target field
-    :param weight: The field to weight by
-
-    Returns the weighted variance and the weighted mean.

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ /dev/null
@@ -1,150 +0,0 @@
-
-
-.. class:: boolean(self, regions, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
-
-
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, pf=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
-
-
-.. class:: cut_region(self, base_region, field_cuts, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.InLineExtractedRegionBase`.)
-
-
-.. class:: cutting(self, normal, center, fields=None, node_name=None, north_vector=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
-
-
-.. class:: disk(self, center, normal, radius, height, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
-
-
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
-
-
-.. class:: extracted_region(self, base_region, indices, force_refresh=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.ExtractedRegionBase`.)
-
-
-.. class:: fixed_res_cutting(self, normal, center, width, dims, fields=None, node_name=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
-
-
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
-
-
-.. class:: grid_collection(self, center, grid_list, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
-
-
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
-
-
-.. class:: inclined_box(self, origin, box_vectors, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
-
-
-.. class:: ortho_ray(self, axis, coords, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
-
-
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
-
-
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
-
-
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
-
-
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
-
-
-.. class:: ray(self, start_point, end_point, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
-
-
-.. class:: region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
-
-
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
-
-
-.. class:: slice(self, axis, coord, fields=None, center=None, pf=None, node_name=False, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
-
-
-.. class:: smoothed_covering_grid(self, *args, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
-
-
-.. class:: sphere(self, center, radius, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
-
-
-.. class:: streamline(self, positions, length=1.0, fields=None, pf=None, **field_parameters):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)
-
-
-.. class:: surface(self, data_source, surface_field, field_value):
-
-   For more information, see :ref:`physical-object-api`
-   (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSurfaceBase`.)

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:c423bcb9e3370a4581cbaaa8e764b95ec13e665aa3b46d452891d76cc79d7acf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -34,7 +35,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *\n",
+      "import yt\n",
       "from yt.analysis_modules.halo_analysis.api import *\n",
       "import tempfile\n",
       "import shutil\n",
@@ -44,7 +45,7 @@
       "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -62,7 +63,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+      "halos_ds = yt.load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -80,7 +81,7 @@
      "collapsed": false,
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
       "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -295,9 +296,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -390,12 +391,13 @@
      "input": [
       "%matplotlib inline\n",
       "import matplotlib.pyplot as plt\n",
+      "import numpy as np\n",
       "\n",
-      "plt.plot(radius, temperature)\n",
+      "plt.plot(np.array(radius), np.array(temperature))\n",
       "\n",
       "plt.semilogy()\n",
-      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
-      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n",
+      "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n",
       "\n",
       "plt.show()"
      ],

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:56a8d72735e3cc428ff04b241d4b2ce6f653019818c6fc7a4148840d99030c85"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,19 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "\n",
+      "from yt.analysis_modules.ppv_cube.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +57,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,12 +98,32 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
-      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
+      "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
      "metadata": {},
@@ -97,7 +140,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
      ],
      "language": "python",
      "metadata": {},
@@ -146,7 +189,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +223,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "ds = yt.load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +245,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +260,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = ds.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +271,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = ds.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e4b5ea69687eb79452c16385b3a6f795b4572518dfa7f9d8a8125bd75b5fea85"
+  "signature": "sha256:5ab80c6b33a115cb88c36fde8659434d14a852dd43b0b419f2bb0c04acf66278"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,7 +20,7 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
+      "import yt\n",
       "import glob\n",
       "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
@@ -77,7 +77,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(my_fns[0])\n",
+      "ds = yt.load(my_fns[0])\n",
       "dd = ds.all_data()\n",
       "indices = dd[\"particle_index\"].astype(\"int\")\n",
       "print indices"
@@ -205,8 +205,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "slc = SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
       "slc.show()"
      ],
      "language": "python",

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:e4db171b795d155870280ddbe8986f55f9a94ffb10783abf9d4cc2de3ec24894"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -89,14 +89,13 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import SZProjection\n",
+      "import yt\n",
+      "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +107,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +126,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +143,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,7 +215,7 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
     }
    ],

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -35,7 +35,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import AbsorptionSpectrum
+  from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
   sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
 

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -2,185 +2,135 @@
 
 Clump Finding
 =============
-.. sectionauthor:: Britton Smith <britton.smith at colorado.edu>
 
-``yt`` has the ability to identify topologically disconnected structures based in a dataset using 
-any field available.  This is powered by a contouring algorithm that runs in a recursive 
-fashion.  The user specifies the initial data object in which the clump-finding will occur, 
-the field over which the contouring will be done, the upper and lower limits of the 
-initial contour, and the contour increment.
+The clump finder uses a contouring algorithm to identified topologically 
+disconnected structures within a dataset.  This works by first creating a 
+single contour over the full range of the contouring field, then continually 
+increasing the lower value of the contour until it reaches the maximum value 
+of the field.  As disconnected structures are identified as separate contoures, 
+the routine continues recursively through each object, creating a hierarchy of 
+clumps.  Individual clumps can be kept or removed from the hierarchy based on 
+the result of user-specified functions, such as checking for gravitational 
+boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder begins by creating a single contour of the specified field over the entire 
-range given.  For every isolated contour identified in the initial iteration, contouring is 
-repeated with the same upper limit as before, but with the lower limit increased by the 
-specified increment.  This repeated for every isolated group until the lower limit is equal 
-to the upper limit.
+The clump finder requires a data container and a field over which the 
+contouring is to be performed.
 
-Often very tiny clumps can appear as groups of only a few cells that happen to be slightly 
-overdense (if contouring over density) with respect to the surrounding gas.  The user may 
-specify criteria that clumps must meet in order to be kept.  The most obvious example is 
-selecting only those clumps that are gravitationally bound.
+.. code:: python
 
-Once the clump-finder has finished, the user can write out a set of quantities for each clump in the 
-index.  Additional info items can also be added.  We also provide a recipe
-for finding clumps in :ref:`cookbook-find_clumps`.
+   import yt
+   from yt.analysis_modules.level_sets.api import *
 
-Treecode Optimization
----------------------
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.1
+   data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                         (8, 'kpc'), (1, 'kpc'))
 
-As mentioned above, the user has the option to limit clumps to those that are
-gravitationally bound.
-The correct and accurate way to calculate if a clump is gravitationally
-bound is to do the full double sum:
+   master_clump = Clump(data_source, ("gas", "density"))
 
-.. math::
+At this point, every isolated contour will be considered a clump, 
+whether this is physical or not.  Validator functions can be added to 
+determine if an individual contour should be considered a real clump.  
+These functions are specified with the ``Clump.add_validator`` function.  
+Current, two validators exist: a minimum number of cells and gravitational 
+boundedness.
 
-  PE = \Sigma_{i=1}^N \Sigma_{j=i}^N \frac{G M_i M_j}{r_{ij}}
+.. code:: python
 
-where :math:`PE` is the gravitational potential energy of :math:`N` cells,
-:math:`G` is the
-gravitational constant, :math:`M_i` is the mass of cell :math:`i`, 
-and :math:`r_{ij}` is the distance
-between cell :math:`i` and :math:`j`.
-The number of calculations required for this calculation
-grows with the square of :math:`N`. Therefore, for large clumps with many cells, the
-test for boundedness can take a significant amount of time.
+   master_clump.add_validator("min_cells", 20)
 
-An effective way to greatly speed up this calculation with minimal error
-is to use the treecode approximation pioneered by
-`Barnes and Hut (1986) <http://adsabs.harvard.edu/abs/1986Natur.324..446B>`_.
-This method of calculating gravitational potentials works by
-grouping individual masses that are located close together into a larger conglomerated
-mass with a geometric size equal to the distribution of the individual masses.
-For a mass cell that is sufficiently distant from the conglomerated mass,
-the gravitational calculation can be made using the conglomerate, rather than
-each individual mass, which saves time.
+   master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-The decision whether or not to use a conglomerate depends on the accuracy control
-parameter ``opening_angle``. Using the small-angle approximation, a conglomerate
-may be used if its geometric size subtends an angle no greater than the
-``opening_angle`` upon the remote mass. The default value is
-``opening_angle = 1``, which gives errors well under 1%. A value of 
-``opening_angle = 0`` is identical to the full O(N^2) method, and larger values
-will speed up the calculation and sacrifice accuracy (see the figures below).
+As many validators as desired can be added, and a clump is only kept if all 
+return True.  If not, a clump is remerged into its parent.  Custom validators 
+can easily be added.  A validator function must only accept a ``Clump`` object 
+and either return True or False.
 
-The treecode method is iterative. Conglomerates may themselves form larger
-conglomerates. And if a larger conglomerate does not meet the ``opening_angle``
-criterion, the smaller conglomerates are tested as well. This iteration of 
-conglomerates will
-cease once the level of the original masses is reached (this is what happens
-for all pair calculations if ``opening_angle = 0``).
+.. code:: python
 
-Below are some examples of how to control the usage of the treecode.
+   def _minimum_gas_mass(clump, min_mass):
+       return (clump["gas", "cell_mass"].sum() >= min_mass)
+   add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-This example will calculate the ratio of the potential energy to kinetic energy
-for a spherical clump using the treecode method with an opening angle of 2.
-The default opening angle is 1.0:
+The ``add_validator`` function adds the validator to a registry that can 
+be accessed by the clump finder.  Then, the validator can be added to the 
+clump finding just like the others.
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
-      treecode=True, opening_angle=2.0)
+.. code:: python
 
-This example will accomplish the same as the above, but will use the full
-N^2 method.
+   master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
-      treecode=False)
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
+and maximum of the contouring field, and the step size.  The lower value of the 
+contour finder will be continually multiplied by the step size.
 
-Here the treecode method is specified for clump finding (this is default).
-Please see the link above for the full example of how to find clumps (the
-trailing backslash is important!):
+.. code:: python
 
-.. code-block:: python
-  
-  function_name = 'self.data.quantities["IsBound"](truncate=True, \
-      include_thermal_energy=True, treecode=True, opening_angle=2.0) > 1.0'
-  master_clump = amods.level_sets.Clump(data_source, None, field,
-      function=function_name)
+   c_min = data_source["gas", "density"].min()
+   c_max = data_source["gas", "density"].max()
+   step = 2.0
+   find_clumps(master_clump, c_min, c_max, step)
 
-To turn off the treecode, of course one should turn treecode=False in the
-example above.
+After the clump finding has finished, the master clump will represent the top 
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+with its own ``children`` attribute, and so on.
 
-Treecode Speedup and Accuracy Figures
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+A number of helper routines exist for examining the clump hierarchy.
 
-Two datasets are used to make the three figures below. Each is a zoom-in
-simulation with high resolution in the middle with AMR, and then lower
-resolution static grids on the periphery. In this way they are very similar to
-a clump in a full-AMR simulation, where there are many AMR levels stacked
-around a density peak. One dataset has a total of 3 levels of AMR, and
-the other has 10 levels, but in other ways are very similar.
+.. code:: python
 
-The first figure shows the effect of varying the opening angle on the speed
-and accuracy of the treecode. The tests were performed using the L=10 
-dataset on a clump with approximately 118,000 cells. The speedup of up the
-treecode is in green, and the accuracy in blue, with the opening angle
-on the x-axis.
+   # Write a text file of the full hierarchy.
+   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
-With an ``opening_angle`` = 0, the accuracy is perfect, but the treecode is
-less than half as fast as the brute-force method. However, by an
-``opening_angle`` of 1, the treecode is now nearly twice as fast, with
-about 0.2% error. This trend continues to an ``opening_angle`` 8, where
-large opening angles have no effect due to geometry.
+   # Write a text file of only the leaf nodes.
+   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-.. image:: _images/TreecodeOpeningAngleBig.png
-   :width: 450
-   :height: 400
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
 
-Note that the accuracy is always below 1. The treecode will always underestimate
-the gravitational binding energy of a clump.
+``Clump`` objects can be used like all other data containers.
 
-In this next figure, the ``opening_angle`` is kept constant at 1, but the
-number of cells is varied on the L=3 dataset by slowly expanding a spherical
-region of analysis. Up to about 100,000 cells,
-the treecode is actually slower than the brute-force method. This is due to
-the fact that with fewer cells, smaller geometric distances,
-and a shallow AMR index, the treecode
-method has very little chance to be applied. The calculation is overall
-slower due to the overhead of the treecode method & startup costs. This
-explanation is further strengthened by the fact that the accuracy of the
-treecode method stay perfect for the first couple thousand cells, indicating
-that the treecode method is not being applied over that range.
+.. code:: python
 
-Once the number of cells gets high enough, and the size of the region becomes
-large enough, the treecode method can work its magic and the treecode method
-becomes advantageous.
+   print leaf_clumps[0]["gas", "density"]
+   print leaf_clumps[0].quantities.total_mass()
 
-.. image:: _images/TreecodeCellsSmall.png
-   :width: 450
-   :height: 400
+The writing functions will write out a series or properties about each 
+clump by default.  Additional properties can be appended with the 
+``Clump.add_info_item`` function.
 
-The saving grace to the figure above is that for small clumps, a difference of
-50% in calculation time is on the order of a second or less, which is tiny
-compared to the minutes saved for the larger clumps where the speedup can
-be greater than 3.
+.. code:: python
 
-The final figure is identical to the one above, but for the L=10 dataset.
-Due to the higher number of AMR levels, which translates into more opportunities
-for the treecode method to be applied, the treecode becomes faster than the
-brute-force method at only about 30,000 cells. The accuracy shows a different
-behavior, with a dip and a rise, and overall lower accuracy. However, at all
-times the error is still well under 1%, and the time savings are significant.
+   master_clump.add_info_item("total_cells")
 
-.. image:: _images/TreecodeCellsBig.png
-   :width: 450
-   :height: 400
+Just like the validators, custom info items can be added by defining functions 
+that minimally accept a ``Clump`` object and return a string to be printed.
 
-The figures above show that the treecode method is generally very advantageous,
-and that the error introduced is minimal.
+.. code:: python
+
+   def _mass_weighted_jeans_mass(clump):
+       jeans_mass = clump.data.quantities.weighted_average_quantity(
+           "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+       return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+   add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+Then, add it to the list:
+
+.. code:: python
+
+   master_clump.add_info_item("mass_weighted_jeans_mass")
+
+By default, the following info items are activated: **total_cells**, 
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
+**max_grid_level**, **min_number_density**, **max_number_density**, and 
+**distance_to_main_clump**.
+
+Clumps can be visualized using the ``annotate_clumps`` callback.
+
+.. code:: python
+
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+                           center='c', width=(20,'kpc'))
+   prj.annotate_clumps(leaf_clumps)
+   prj.save('clumps')

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,8 +58,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(pf)
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters
@@ -69,8 +69,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  haloes = LoadHaloes(pf, 'MyHaloList')
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
 object "haloes", you can get loop over the list of haloes and do
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.ellipsoid(ell_param[0],
+  ell = ds.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -8,6 +8,8 @@
    :maxdepth: 1
 
    halo_catalogs
+   halo_transition
    halo_finding
    halo_mass_function
+   halo_merger_tree
    halo_analysis_example

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -1,3 +1,5 @@
+.. _halo-analysis-example:
+
 Using HaloCatalogs to do Analysis
 ---------------------------------
 

diff -r 6bf7153bf3f874d05959f8b69c52df18c2a97ab2 -r 532acb0e5d1258721908735c027e3495c80dfbde doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,3 +1,4 @@
+.. _halo_catalog:
 
 Creating Halo Catalogs
 ======================
@@ -7,9 +8,11 @@
 together into a single framework. This framework is substantially
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.0 please see :ref:`halo-transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
-catalog through data_pf. These halos can be found using friends-of-friends,
+catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
 use. The available arguments are 'fof', 'hop', and'rockstar'. For more
 details on the relative differences between these halo finders see 
@@ -19,32 +22,32 @@
 
    from yt.mods import *
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
 HOP finders. Even though rockstar creates one file per processor, 
 specifying any one file allows the full catalog to be loaded. Here we 
 only specify the file output by the processor with ID 0. Note that the 
-argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
 any new information about the identified halos. To associate the halos 
 with the dataset from which they were found, supply arguments to both 
-halos_pf and data_pf.
+halos_ds and data_ds.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -215,8 +218,8 @@
 
 .. code-block:: python
 
-   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-   hc = HaloCatalog(halos_pf=hpf,
+   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")
@@ -226,4 +229,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/c259c6a5f8ac/
Changeset:   c259c6a5f8ac
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-30 08:56:57
Summary:     Merged in MatthewTurk/yt/yt-3.0 (pull request #903)

Adding dengo and grackle fields to Enzo.
Affected #:  1 file

diff -r d2c88a547e71b223491ca4a00fda7bc783eb2c3a -r c259c6a5f8ac42834e77c4056bdb635de93620c8 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -44,12 +44,23 @@
     'DI'      : 'D',
     'DII'     : 'D_p1',
     'HDI'     : 'HD',
-    'Electron': 'El'
+    'Electron': 'El',
+    'OI'      : 'O',
+    'OII'     : 'O_p1',
+    'OIII'    : 'O_p2',
+    'OIV'     : 'O_p3',
+    'OV'      : 'O_p4',
+    'OVI'     : 'O_p5',
+    'OVII'    : 'O_p6',
+    'OVIII'   : 'O_p7',
+    'OIX'     : 'O_p8',
 }
 
 class EnzoFieldInfo(FieldInfoContainer):
     known_other_fields = (
         ("Cooling_Time", ("s", ["cooling_time"], None)),
+        ("Dengo_Cooling_Rate", ("erg/g/s", [], None)),
+        ("Grackle_Cooling_Rate", ("erg/s/cm**3", [], None)),
         ("HI_kph", ("1/code_time", [], None)),
         ("HeI_kph", ("1/code_time", [], None)),
         ("HeII_kph", ("1/code_time", [], None)),
@@ -147,9 +158,10 @@
         # Now we conditionally load a few other things.
         params = self.ds.parameters
         multi_species = params.get("MultiSpecies", None)
+        dengo = params.get("DengoChemistryModel", 0)
         if multi_species is None:
             multi_species = params["Physics"]["AtomicPhysics"]["MultiSpecies"]
-        if multi_species > 0:
+        if multi_species > 0 or dengo == 1:
             self.setup_species_fields()
         self.setup_energy_field()

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list