[yt-svn] commit/yt: 45 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Jul 29 13:13:23 PDT 2014


45 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7d4c148ba9d0/
Changeset:   7d4c148ba9d0
Branch:      yt-3.0
User:        bcrosby
Date:        2014-03-25 02:59:34
Summary:     Halo mass function has been reworked to accept halo dataset objects rather than halo output files.
Affected #:  1 file

diff -r bf4b8d5911ea438b461a0df59f17c2260262feb9 -r 7d4c148ba9d0ef64c2ac39a4fae37115bfe03561 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -21,18 +21,24 @@
     ParallelDummy, \
     ParallelAnalysisInterface, \
     parallel_blocking_call
+'''
 from yt.utilities.physical_constants import \
     cm_per_mpc, \
     mass_sun_cgs, \
     rho_crit_now
-
+'''
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
     Initalize a HaloMassFcn object to analyze the distribution of haloes
     as a function of mass.
-    :param halo_file (str): The filename of the output of the Halo Profiler.
+    :param ds (str): The loaded simulation dataset.
     Default=None.
+    :param halos_ds (str): The loaded halo dataset.
+    Default=None.
+    :param make_analytic (bool): Are we going to calculate an analytic mass
+    function, True for yes, False for no.
+    Default=False.
     :param omega_matter0 (float): The fraction of the universe made up of
     matter (dark and baryonic). Default=None.
     :param omega_lambda0 (float): The fraction of the universe made up of
@@ -67,16 +73,14 @@
     1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
     5 = Tinker
     Default=4.
-    :param mass_column (int): The column of halo_file that contains the
-    masses of the haloes. Default=4.
     """
-    def __init__(self, pf, halo_file=None, omega_matter0=None, omega_lambda0=None,
-    omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
-    this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
-    fitting_function=4, mass_column=5):
+    def __init__(self, ds=None, halos_ds=None, make_analytic=False, omega_matter0=None, 
+    omega_lambda0=None, omega_baryon0=0.05, hubble0=None, sigma8input=0.86, 
+    primordial_index=1.0, this_redshift=None, log_mass_min=None, log_mass_max=None, 
+    num_sigma_bins=360, fitting_function=4):
         ParallelAnalysisInterface.__init__(self)
-        self.pf = pf
-        self.halo_file = halo_file
+        self.ds = ds
+        self.halos_ds = halos_ds
         self.omega_matter0 = omega_matter0
         self.omega_lambda0 = omega_lambda0
         self.omega_baryon0 = omega_baryon0
@@ -88,30 +92,30 @@
         self.log_mass_max = log_mass_max
         self.num_sigma_bins = num_sigma_bins
         self.fitting_function = fitting_function
-        self.mass_column = mass_column
-        
-        # Determine the run mode.
-        if halo_file is None:
-            # We are hand-picking our various cosmological parameters
-            self.mode = 'single'
-        else:
-            # Make the fit using the same cosmological parameters as the dataset.
-            self.mode = 'haloes'
-            self.omega_matter0 = self.pf.omega_matter
-            self.omega_lambda0 = self.pf.omega_lambda
-            self.hubble0 = self.pf.hubble_constant
-            self.this_redshift = self.pf.current_redshift
-            self.read_haloes()
-            if self.log_mass_min == None:
-                self.log_mass_min = math.log10(min(self.haloes))
-            if self.log_mass_max == None:
-                self.log_mass_max = math.log10(max(self.haloes))
+        self.make_analytic = make_analytic
 
-        # Input error check.
-        if self.mode == 'single':
+        """
+        If we want to make an analytic mass function, grab what we can from either the 
+        halo file or the data set, and make sure that the user supplied everything else
+        that is needed.
+        """
+        if make_analytic == True:
+            # First try to get it from the ds
+            if ds is not None:
+                self.omega_matter0 = self.ds.omega_matter
+                self.omega_lambda0 = self.ds.omega_lambda
+                self.hubble0 = self.ds.hubble_constant
+                self.this_redshift = self.ds.current_redshift
+            # If we can't do that, try to get it from the halos_ds
+            if ds is None and halos_ds is not None:
+                self.omega_matter0 = self.halos_ds.omega_matter
+                self.omega_lambda0 = self.halos_ds.omega_lambda
+                self.hubble0 = self.halos_ds.hubble_constant
+                self.this_redshift = self.halos_ds.current_redshift
+            # Check that all the parameters for the analytic function have been set
             if omega_matter0 == None or omega_lambda0 == None or \
             hubble0 == None or this_redshift == None or log_mass_min == None or\
-            log_mass_max == None:
+            log_mass_max == None:            
                 mylog.error("All of these parameters need to be set:")
                 mylog.error("[omega_matter0, omega_lambda0, \
                 hubble0, this_redshift, log_mass_min, log_mass_max]")
@@ -119,21 +123,15 @@
                 omega_lambda0, hubble0, this_redshift,\
                 log_mass_min, log_mass_max))
                 return None
-        
-        # Poke the user to make sure they're doing it right.
-        mylog.info(
+            # Do the calculations.
+            self.sigmaM()
+            self.dndm()
+
         """
-        Please make sure these are the correct values! They are
-        not stored in enzo datasets, so must be entered by hand.
-        sigma8input=%f primordial_index=%f omega_baryon0=%f
-        """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        
-        # Do the calculations.
-        self.sigmaM()
-        self.dndm()
-        
-        if self.mode == 'haloes':
-            self.bin_haloes()
+        If a halo file has been supplied, make a mass function for the simulated halos.
+        """
+        if halos_ds is not None:
+            self.create_sim_hmf()
 
     def write_out(self, prefix='HMF', fit=True, haloes=True):
         """
@@ -194,20 +192,24 @@
         f.close()
         self.haloes = np.array(self.haloes)
 
-    def bin_haloes(self):
-        """
-        With the list of virial masses, find the halo mass function.
-        """
-        bins = np.logspace(self.log_mass_min,
-            self.log_mass_max,self.num_sigma_bins)
-        avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = np.histogram(self.haloes,bins)
-        # add right to left
-        for i,b in enumerate(dis):
-            dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
-            if i == (self.num_sigma_bins - 3): break
-
-        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
+    """
+    Here's where we create the halo mass functions from simulated halos
+    """
+    def create_sim_hmf(self):
+        data_source = self.halos_ds.all_data()
+        # We're going to use indices to count the number of halos above a given mass
+        masses_sim = np.sort(data_source['ParticleMassMsun'])
+        # Determine the size of the simulation volume in (Mpc/h)**3
+        sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
+        # Get rid of the densities that correspond to repeated halo masses
+        dn_dM_sim = np.arange(len(masses_sim),0,-1)
+        # We don't want repeated halo masses, and the uniques indices tell us which 
+        # densities are representative.
+        self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
+        # Now make this an actual number density
+        self.dn_dM_sim = dn_dM_sim[unique_indices]/sim_volume
+        # masses_sim and dn_dM_sim are now set, but remember that the log10 quantities
+        # are what is usually plotted for a halo mass function.
 
     def sigmaM(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/e954e71c0c20/
Changeset:   e954e71c0c20
Branch:      yt-3.0
User:        bcrosby
Date:        2014-03-25 19:47:55
Summary:     Forgot to convert length from comoving Mpc to comoving Mpc/h
Affected #:  1 file

diff -r 7d4c148ba9d0ef64c2ac39a4fae37115bfe03561 -r e954e71c0c2053a4c7235fb99e565d68e71f7790 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -200,7 +200,7 @@
         # We're going to use indices to count the number of halos above a given mass
         masses_sim = np.sort(data_source['ParticleMassMsun'])
         # Determine the size of the simulation volume in (Mpc/h)**3
-        sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
+        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')/self.halos_ds.hubble_constant).prod()
         # Get rid of the densities that correspond to repeated halo masses
         dn_dM_sim = np.arange(len(masses_sim),0,-1)
         # We don't want repeated halo masses, and the uniques indices tell us which 


https://bitbucket.org/yt_analysis/yt/commits/7409a134aa53/
Changeset:   7409a134aa53
Branch:      yt-3.0
User:        bcrosby
Date:        2014-03-25 21:35:03
Summary:     rho_crit now uses units aware value, switched back to calculating the HMF for simulated halos in comoving Mpc**3 rather than comoving (Mpc/h)**3 to be be consistent with the output from the analytic calculation.
Affected #:  2 files

diff -r e954e71c0c2053a4c7235fb99e565d68e71f7790 -r 7409a134aa53524b933e8b7779a1bda4e8c5d811 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -21,12 +21,8 @@
     ParallelDummy, \
     ParallelAnalysisInterface, \
     parallel_blocking_call
-'''
-from yt.utilities.physical_constants import \
-    cm_per_mpc, \
-    mass_sun_cgs, \
-    rho_crit_now
-'''
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
@@ -113,6 +109,7 @@
                 self.hubble0 = self.halos_ds.hubble_constant
                 self.this_redshift = self.halos_ds.current_redshift
             # Check that all the parameters for the analytic function have been set
+            '''
             if omega_matter0 == None or omega_lambda0 == None or \
             hubble0 == None or this_redshift == None or log_mass_min == None or\
             log_mass_max == None:            
@@ -123,6 +120,7 @@
                 omega_lambda0, hubble0, this_redshift,\
                 log_mass_min, log_mass_max))
                 return None
+            '''
             # Do the calculations.
             self.sigmaM()
             self.dndm()
@@ -133,6 +131,25 @@
         if halos_ds is not None:
             self.create_sim_hmf()
 
+    """
+    Here's where we create the halo mass functions from simulated halos
+    """
+    def create_sim_hmf(self):
+        data_source = self.halos_ds.all_data()
+        # We're going to use indices to count the number of halos above a given mass
+        masses_sim = np.sort(data_source['ParticleMassMsun'])
+        # Determine the size of the simulation volume in comoving Mpc**3
+        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')).prod()
+        # Get rid of the densities that correspond to repeated halo masses
+        dn_dM_sim = np.arange(len(masses_sim),0,-1)
+        # We don't want repeated halo masses, and the uniques indices tell us which 
+        # densities are representative.
+        self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
+        # Now make this an actual number density
+        self.dn_dM_sim = dn_dM_sim[unique_indices]/sim_volume
+        # masses_sim and dn_dM_sim are now set, but remember that the log10 quantities
+        # are what is usually plotted for a halo mass function.
+
     def write_out(self, prefix='HMF', fit=True, haloes=True):
         """
         Writes out the halo mass functions to file(s) with prefix *prefix*.
@@ -169,47 +186,6 @@
                 self.dis[i])
                 fp.write(line)
             fp.close()
-        
-    def read_haloes(self):
-        """
-        Read in the virial masses of the haloes.
-        """
-        mylog.info("Reading halo masses from %s" % self.halo_file)
-        f = open(self.halo_file,'r')
-        line = f.readline()
-        if line == "":
-            self.haloes = np.array([])
-            return
-        while line[0] == '#':
-            line = f.readline()
-        self.haloes = []
-        while line:
-            line = line.split()
-            mass = float(line[self.mass_column])
-            if mass > 0:
-                self.haloes.append(float(line[self.mass_column]))
-            line = f.readline()
-        f.close()
-        self.haloes = np.array(self.haloes)
-
-    """
-    Here's where we create the halo mass functions from simulated halos
-    """
-    def create_sim_hmf(self):
-        data_source = self.halos_ds.all_data()
-        # We're going to use indices to count the number of halos above a given mass
-        masses_sim = np.sort(data_source['ParticleMassMsun'])
-        # Determine the size of the simulation volume in (Mpc/h)**3
-        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')/self.halos_ds.hubble_constant).prod()
-        # Get rid of the densities that correspond to repeated halo masses
-        dn_dM_sim = np.arange(len(masses_sim),0,-1)
-        # We don't want repeated halo masses, and the uniques indices tell us which 
-        # densities are representative.
-        self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
-        # Now make this an actual number density
-        self.dn_dM_sim = dn_dM_sim[unique_indices]/sim_volume
-        # masses_sim and dn_dM_sim are now set, but remember that the log10 quantities
-        # are what is usually plotted for a halo mass function.
 
     def sigmaM(self):
         """
@@ -257,8 +233,9 @@
         sigma_normalization = self.sigma8input / sigma8_unnorm;
 
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
+               .in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()       
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -294,8 +271,10 @@
         
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-                rho_crit_now * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
+               .in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()
+        print rho0
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density

diff -r e954e71c0c2053a4c7235fb99e565d68e71f7790 -r 7409a134aa53524b933e8b7779a1bda4e8c5d811 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -79,6 +79,7 @@
 # flux
 jansky_cgs = 1.0e-23
 # Cosmological constants
+# Calculated with H = 100 km/s/Mpc
 rho_crit_g_cm3_h2 = 1.8788e-29
 
 # Misc. Approximations


https://bitbucket.org/yt_analysis/yt/commits/e93bc0139d69/
Changeset:   e93bc0139d69
Branch:      yt-3.0
User:        bcrosby
Date:        2014-03-25 23:02:37
Summary:     fixed some units, clarified a few variable names, and corrected the spelling of haloes to halos.
Affected #:  1 file

diff -r 7409a134aa53524b933e8b7779a1bda4e8c5d811 -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -26,8 +26,8 @@
 
 class HaloMassFcn(ParallelAnalysisInterface):
     """
-    Initalize a HaloMassFcn object to analyze the distribution of haloes
-    as a function of mass.
+    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    a function of mass.
     :param ds (str): The loaded simulation dataset.
     Default=None.
     :param halos_ds (str): The loaded halo dataset.
@@ -109,18 +109,16 @@
                 self.hubble0 = self.halos_ds.hubble_constant
                 self.this_redshift = self.halos_ds.current_redshift
             # Check that all the parameters for the analytic function have been set
-            '''
-            if omega_matter0 == None or omega_lambda0 == None or \
-            hubble0 == None or this_redshift == None or log_mass_min == None or\
-            log_mass_max == None:            
+            if self.omega_matter0 == None or self.omega_lambda0 == None or \
+            self.hubble0 == None or self.this_redshift == None or \
+            self.log_mass_min == None or self.log_mass_max == None:            
                 mylog.error("All of these parameters need to be set:")
                 mylog.error("[omega_matter0, omega_lambda0, \
-                hubble0, this_redshift, log_mass_min, log_mass_max]")
-                mylog.error("[%s,%s,%s,%s,%s,%s]" % (omega_matter0,\
-                omega_lambda0, hubble0, this_redshift,\
-                log_mass_min, log_mass_max))
+hubble0, this_redshift, log_mass_min, log_mass_max]")
+                mylog.error("[%s,%s,%s,%s,%s,%s]" % (self.omega_matter0,\
+                self.omega_lambda0, self.hubble0, self.this_redshift,\
+                self.log_mass_min, self.log_mass_max))
                 return None
-            '''
             # Do the calculations.
             self.sigmaM()
             self.dndm()
@@ -129,6 +127,7 @@
         If a halo file has been supplied, make a mass function for the simulated halos.
         """
         if halos_ds is not None:
+            self.make_simulated=True
             self.create_sim_hmf()
 
     """
@@ -141,28 +140,28 @@
         # Determine the size of the simulation volume in comoving Mpc**3
         sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')).prod()
         # Get rid of the densities that correspond to repeated halo masses
-        dn_dM_sim = np.arange(len(masses_sim),0,-1)
+        n_cumulative_sim = np.arange(len(masses_sim),0,-1)
         # We don't want repeated halo masses, and the uniques indices tell us which 
         # densities are representative.
         self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
         # Now make this an actual number density
-        self.dn_dM_sim = dn_dM_sim[unique_indices]/sim_volume
-        # masses_sim and dn_dM_sim are now set, but remember that the log10 quantities
+        self.n_cumulative_sim = n_cumulative_sim[unique_indices]/sim_volume
+        # masses_sim and n_cumulative_sim are now set, but remember that the log10 quantities
         # are what is usually plotted for a halo mass function.
 
-    def write_out(self, prefix='HMF', fit=True, haloes=True):
+    def write_out(self, prefix='HMF', analytic=True, simulated=True):
         """
         Writes out the halo mass functions to file(s) with prefix *prefix*.
         """
-        # First the fit file.
-        if fit:
-            fitname = prefix + '-fit.dat'
+        # First the analytic file.
+        if self.make_analytic==True and analytic:
+            fitname = prefix + '-analytic.dat'
             fp = self.comm.write_on_root(fitname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
-#3. (dn/dM)*dM (differential number density of haloes, per Mpc^3 (NOT h^3/Mpc^3)
+#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
 #4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
             fp.write(line)
@@ -171,19 +170,20 @@
                 self.dn_M_z[i], self.nofmz_cum[i])
                 fp.write(line)
             fp.close()
-        if self.mode == 'haloes' and haloes:
-            haloname = prefix + '-haloes.dat'
+        if self.make_simulated==True and simulated:
+            haloname = prefix + '-simulated.dat'
             fp = self.comm.write_on_root(haloname)
             line = \
             """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
-#3. cumulative number density of haloes (per Mpc^3, NOT h^3/Mpc^3)
+#3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
             fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dis[i])
+            for i in xrange(self.masses_sim.size - 1):
+                line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
+                self.masses_sim[i]/self.hubble0,
+                self.n_cumulative_sim[i])
                 fp.write(line)
             fp.close()
 
@@ -268,13 +268,12 @@
             # All done!
 
     def dndm(self):
-        
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
         rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
                .in_units('Msun/Mpc**3')
         rho0 = rho0.value.item()
-        print rho0
+
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density


https://bitbucket.org/yt_analysis/yt/commits/e0cce48bc6f3/
Changeset:   e0cce48bc6f3
Branch:      yt-3.0
User:        bcrosby
Date:        2014-03-25 23:23:34
Summary:     merged
Affected #:  53 files

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -1,9 +1,10 @@
-import os, shutil, string, glob
+import os, shutil, string, glob, re
 from sphinx.util.compat import Directive
 from docutils import nodes
 from docutils.parsers.rst import directives
 from IPython.nbconvert import html, python
-from runipy.notebook_runner import NotebookRunner
+from IPython.nbformat.current import read, write
+from runipy.notebook_runner import NotebookRunner, NotebookError
 
 class NotebookDirective(Directive):
     """Insert an evaluated notebook into a document
@@ -57,12 +58,8 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        try:
-            evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
-                                               skip_exceptions=skip_exceptions)
-        except:
-            # bail
-            return []
+        evaluated_text = evaluate_notebook(nb_abs_path, dest_path_eval,
+                                           skip_exceptions=skip_exceptions)
 
         # Create link to notebook and script files
         link_rst = "(" + \
@@ -138,11 +135,20 @@
     # Create evaluated version and save it to the dest path.
     # Always use --pylab so figures appear inline
     # perhaps this is questionable?
-    nb_runner = NotebookRunner(nb_path, pylab=False)
-    nb_runner.run_notebook(skip_exceptions=skip_exceptions)
+    notebook = read(open(nb_path), 'json')
+    nb_runner = NotebookRunner(notebook, pylab=False)
+    try:
+        nb_runner.run_notebook(skip_exceptions=skip_exceptions)
+    except NotebookError as e:
+        print ''
+        print e
+        # Return the traceback, filtering out ANSI color codes.
+        # http://stackoverflow.com/questions/13506033/filtering-out-ansi-escape-sequences
+        return 'Notebook conversion failed with the following traceback: \n%s' % \
+            re.sub(r'\\033[\[\]]([0-9]{1,2}([;@][0-9]{0,2})*)*[mKP]?', '', str(e))
     if dest_path is None:
         dest_path = 'temp_evaluated.ipynb'
-    nb_runner.save_notebook(dest_path)
+    write(nb_runner.nb, open(dest_path, 'w'), 'json')
     ret = nb_to_html(dest_path)
     if dest_path is 'temp_evaluated.ipynb':
         os.remove(dest_path)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/extensions/notebookcell_sphinxext.py
--- a/doc/extensions/notebookcell_sphinxext.py
+++ b/doc/extensions/notebookcell_sphinxext.py
@@ -35,12 +35,7 @@
 
         skip_exceptions = 'skip_exceptions' in self.options
 
-        try:
-            evaluated_text = \
-                evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
-        except:
-            # bail
-            return []
+        evaluated_text = evaluate_notebook('temp.ipynb', skip_exceptions=skip_exceptions)
 
         # create notebook node
         attributes = {'format': 'html', 'source': 'nb_path'}

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/analyzing/objects.rst
--- a/doc/source/analyzing/objects.rst
+++ b/doc/source/analyzing/objects.rst
@@ -242,15 +242,15 @@
 .. notebook-cell::
 
    from yt.mods import *
-   pf = load("enzo_tiny_cosmology/DD0046/DD0046")
-   ad = pf.h.all_data()
-   total_mass = ad.quantities["TotalQuantity"]("cell_mass")
+   ds = load("enzo_tiny_cosmology/DD0046/DD0046")
+   ad = ds.all_data()
+   total_mass = ad.quantities.total_mass()
    # now select only gas with 1e5 K < T < 1e7 K.
    new_region = ad.cut_region(['obj["temperature"] > 1e5',
                                'obj["temperature"] < 1e7'])
-   cut_mass = new_region.quantities["TotalQuantity"]("cell_mass")
+   cut_mass = new_region.quantities.total_mass()
    print "The fraction of mass in this temperature range is %f." % \
-     (cut_mass[0] / total_mass[0])
+     (cut_mass / total_mass)
 
 The ``cut_region`` function generates a new object containing only the cells 
 that meet all of the specified criteria.  The sole argument to ``cut_region`` 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/bootcamp/3)_Simple_Visualization.ipynb
--- a/doc/source/bootcamp/3)_Simple_Visualization.ipynb
+++ b/doc/source/bootcamp/3)_Simple_Visualization.ipynb
@@ -243,7 +243,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "s = SlicePlot(pf, \"x\", [\"density\"], center=\"max\")\n",
+      "s = SlicePlot(ds, \"x\", [\"density\"], center=\"max\")\n",
       "s.annotate_contour(\"temperature\")\n",
       "s.zoom(2.5)"
      ],
@@ -272,4 +272,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
--- a/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
+++ b/doc/source/bootcamp/4)_Data_Objects_and_Time_Series.ipynb
@@ -68,7 +68,7 @@
       "for ds in ts:\n",
       "    dd = ds.all_data()\n",
       "    rho_ex.append(dd.quantities.extrema(\"density\"))\n",
-      "    times.append(pf.current_time.in_units(\"Gyr\"))\n",
+      "    times.append(ds.current_time.in_units(\"Gyr\"))\n",
       "rho_ex = np.array(rho_ex)"
      ],
      "language": "python",
@@ -211,7 +211,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
+      "ds = load(\"IsolatedGalaxy/galaxy0030/galaxy0030\")\n",
       "v, c = ds.find_max(\"density\")\n",
       "sl = ds.slice(0, c[0])\n",
       "print sl[\"index\", \"x\"], sl[\"index\", \"z\"], sl[\"pdx\"]\n",
@@ -361,4 +361,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/cookbook/embedded_javascript_animation.ipynb
--- a/doc/source/cookbook/embedded_javascript_animation.ipynb
+++ b/doc/source/cookbook/embedded_javascript_animation.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:578ca4fbc3831e9093489f06939abce9cde845b6cf75d901a3c429abc270f550"
+  "signature": "sha256:4f7d409d15ecc538096d15212923312e2cb4a911ebf5a9cf7edc9bd63a8335e9"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -47,7 +47,8 @@
       "import matplotlib.pyplot as plt\n",
       "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
       "\n",
-      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'mpccm'))\n",
+      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
+      "prj.set_figure_size(5)\n",
       "prj.set_zlim('density',1e-32,1e-26)\n",
       "fig = prj.plots['density'].figure\n",
       "fig.canvas = FigureCanvasAgg(fig)\n",

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/cookbook/embedded_webm_animation.ipynb
--- a/doc/source/cookbook/embedded_webm_animation.ipynb
+++ b/doc/source/cookbook/embedded_webm_animation.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:67844f8c2c184fc51aa62440cc05623ee85f252edde6faaa0d7b6617c3f33dfe"
+  "signature": "sha256:0090176ae6299b2310bf613404cbfbb42a54e19a03d1469d1429a01170a63aa0"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -96,7 +96,7 @@
       "import matplotlib.pyplot as plt\n",
       "from matplotlib.backends.backend_agg import FigureCanvasAgg\n",
       "\n",
-      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'mpccm'))\n",
+      "prj = ProjectionPlot(load('Enzo_64/DD0000/data0000'), 0, 'density', weight_field='density',width=(180,'Mpccm'))\n",
       "prj.set_zlim('density',1e-32,1e-26)\n",
       "fig = prj.plots['density'].figure\n",
       "fig.canvas = FigureCanvasAgg(fig)\n",

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -372,25 +372,6 @@
 particle type specifications.  *These are all excellent projects for new
 contributors!*
 
-Tipsy data cannot be automatically detected.  You can load it with a command
-similar to the following:
-
-.. code-block:: python
-
-    ds = TipsyDataset('test.00169',
-        parameter_file='test.param',
-        endian = '<',
-        domain_left_edge = domain_left_edge,
-        domain_right_edge = domain_right_edge,
-    )
-
-Not all of these arguments are necessary; additionally, yt accepts the
-arguments ``n_ref``, ``over_refine_factor``, ``cosmology_parameters``, and
-``unit_base``.  By default, yt will not utilize a parameter file, and by
-default it will assume the data is "big" endian (`>`).  Optionally, you may
-specify ``field_dtypes``, which describe the size of various fields.  For
-example, if you have stored positions as 64-bit floats, you can specify this
-with:
 
 .. code-block:: python
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -604,13 +604,12 @@
 
    ~yt.data_objects.derived_quantities.DerivedQuantity
    ~yt.data_objects.derived_quantities.DerivedQuantityCollection
-   ~yt.data_objects.derived_quantities.WeightedAverage
-   ~yt.data_objects.derived_quantities.TotalValue
+   ~yt.data_objects.derived_quantities.WeightedAverageQuantity
+   ~yt.data_objects.derived_quantities.TotalQuantity
    ~yt.data_objects.derived_quantities.TotalMass
    ~yt.data_objects.derived_quantities.CenterOfMass
    ~yt.data_objects.derived_quantities.BulkVelocity
    ~yt.data_objects.derived_quantities.AngularMomentumVector
-   ~yt.data_objects.derived_quantities.ParticleAngularMomentumVector
    ~yt.data_objects.derived_quantities.Extrema
    ~yt.data_objects.derived_quantities.MaxLocation
    ~yt.data_objects.derived_quantities.MinLocation
@@ -719,12 +718,11 @@
 
    ~yt.config.YTConfigParser
    ~yt.utilities.parameter_file_storage.ParameterFileStore
-   ~yt.data_objects.data_containers.FakeGridForParticles
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.index_subset.index_subset.ConstructedRootGrid
-   ~yt.analysis_modules.index_subset.index_subset.ExtractedHierarchy
+   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
+   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -9,9 +9,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
-   slc.annotate_arrow((0.53, 0.53, 0.53), 1/pf['kpc'])
+   slc.annotate_arrow((0.5, 0.5, 0.5), (1, 'kpc'))
    slc.save()
 
 -------------
@@ -30,7 +30,7 @@
 
    pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
-                           8./pf.units['kpc'], 1./pf.units['kpc'])
+                           (8., 'kpc'), (1., 'kpc'))
 
    c_min = 10**na.floor(na.log10(data_source['density']).min()  )
    c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
@@ -79,7 +79,7 @@
    from yt.mods import *
    pf = load("Enzo_64/DD0043/data0043")
    s = OffAxisSlicePlot(pf, [1,1,0], ["density"], center="c")
-   s.annotate_cquiver('CuttingPlaneVelocityX', 'CuttingPlaneVelocityY', 10)
+   s.annotate_cquiver('cutting_plane_velocity_x', 'cutting_plane_velocity_y', 10)
    s.zoom(1.5)
    s.save()
 
@@ -97,7 +97,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'), center='max')
    slc.annotate_grids()
    slc.save()
@@ -153,7 +153,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_image_line((0.3, 0.4), (0.8, 0.9), plot_args={'linewidth':5})
    p.save()
@@ -169,7 +169,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_line([-6, -4, -2, 0, 2, 4, 6], [3.6, 1.6, 0.4, 0, 0.4, 1.6, 3.6], plot_args={'linewidth':5})
    p.save()
@@ -212,9 +212,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   s.annotate_marker([0.53, 0.53, 0.53], plot_args={'s':10000})
+   s.annotate_marker([0.5, 0.5, 0.5], plot_args={'s':10000})
    s.save()   
 
 -------------
@@ -237,7 +237,7 @@
    from yt.mods import *
    pf = load("Enzo_64/DD0043/data0043")
    p = ProjectionPlot(pf, "x", "density", center='m', width=(10, 'Mpc'))
-   p.annotate_particles(10/pf['Mpc'])
+   p.annotate_particles((10, 'Mpc'))
    p.save()
 
 -------------
@@ -253,9 +253,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   p.annotate_point([0.53, 0.526, 0.53], "What's going on here?", text_args={'size':'xx-large', 'color':'w'})
+   p.annotate_point([0.5, 0.496, 0.5], "What's going on here?", text_args={'size':'xx-large', 'color':'w'})
    p.save()
 
 -------------
@@ -273,8 +273,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], 
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center=[0.5, 0.5, 0.5], 
                       weight_field='density', width=(20, 'kpc'))
    p.annotate_quiver('velocity_x', 'velocity_y', 16)
    p.save()
@@ -292,9 +292,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
-   p.annotate_sphere([0.53, 0.53, 0.53], 2/pf['kpc'], {'fill':True})
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
+   p.annotate_sphere([0.5, 0.5, 0.5], (2, 'kpc'), {'fill':True})
    p.save()
 
 -------------
@@ -314,8 +314,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   s = SlicePlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   s = SlicePlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
    s.annotate_streamlines('velocity_x', 'velocity_y')
    s.save()
 
@@ -333,9 +333,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    s = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
-   s.annotate_text((0.53, 0.53), 'Sample text', text_args={'size':'xx-large', 'color':'w'})
+   s.annotate_text((0.5, 0.5), 'Sample text', text_args={'size':'xx-large', 'color':'w'})
    s.save()
 
 -------------
@@ -349,8 +349,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   p = ProjectionPlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20, 'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   p = ProjectionPlot(pf, 'z', 'density', center='c', width=(20, 'kpc'))
    p.annotate_title('Density plot')
    p.save()
 
@@ -373,7 +373,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = SlicePlot(pf, 'z', 'density', center='m', width=(10, 'kpc'))
    p.annotate_velocity()
    p.save()

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/visualizing/manual_plotting.rst
--- a/doc/source/visualizing/manual_plotting.rst
+++ b/doc/source/visualizing/manual_plotting.rst
@@ -39,13 +39,13 @@
    pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
    c = pf.h.find_max('density')[1]
-   proj = pf.proj(0, 'density')
+   proj = pf.proj('density', 0)
 
-   width = 10/pf['kpc'] # we want a 1.5 mpc view
+   width = (10, 'kpc') # we want a 1.5 mpc view
    res = [1000, 1000] # create an image with 1000x1000 pixels
    frb = proj.to_frb(width, res, center=c)
 
-   P.imshow(frb['density'])
+   P.imshow(np.array(frb['density']))
    P.savefig('my_perfect_figure.png')
    
 The FRB is a very small object that can be deleted and recreated quickly (in
@@ -76,10 +76,10 @@
    ray = pf.ortho_ray(ax, (c[1], c[2])) # cutting through the y0,z0 such that we hit the max density
 
    P.subplot(211)
-   P.semilogy(ray['x'], ray['density'])
+   P.semilogy(np.array(ray['x']), np.array(ray['density']))
    P.ylabel('density')
    P.subplot(212)
-   P.semilogy(ray['x'], ray['temperature'])
+   P.semilogy(np.array(ray['x']), np.array(ray['temperature']))
    P.xlabel('x')
    P.ylabel('temperature')
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -108,13 +108,13 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'density', center=[0.53, 0.53, 0.53], width=(20,'kpc'))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', center=[0.5, 0.5, 0.5], width=(20,'kpc'))
    slc.save()
 
 The above example will display an annotated plot of a slice of the
 Density field in a 20 kpc square window centered on the coordinate
-(0.53,0.53) in the x-y plane.  The axis to slice along is keyed to the
+(0.5, 0.5, 0.5) in the x-y plane.  The axis to slice along is keyed to the
 letter 'z', corresponding to the z-axis.  Finally, the image is saved to
 a png file.
 
@@ -124,18 +124,19 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z','Pressure', center=[0.53, 0.53, 0.53])
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'pressure', center='c')
    slc.save()
    slc.zoom(30)
    slc.save('zoom')
 
-will save a slice of the pressure field in a slice along the z
+will save a plot of the pressure field in a slice along the z
 axis across the entire simulation domain followed by another plot that
 is zoomed in by a factor of 30 with respect to the original
-image. With these sorts of manipulations, one can easily pan and zoom
-onto an interesting region in the simulation and adjust the
-boundaries of the region to visualize on the fly.
+image. Both plots will be centered on the center of the simulation box. 
+With these sorts of manipulations, one can easily pan and zoom onto an 
+interesting region in the simulation and adjust the boundaries of the
+region to visualize on the fly.
 
 A slice object can also add annotations like a title, an overlying
 quiver plot, the location of grid boundaries, halo-finder annotations,
@@ -145,12 +146,12 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.annotate_grids()
    slc.save()
 
-will plot the VorticitySquared in a 10 kiloparsec slice through the
+will plot the density field in a 10 kiloparsec slice through the
 z-axis centered on the highest density point in the simulation domain.
 Before saving the plot, the script annotates it with the grid
 boundaries, which are drawn as thick black lines by default.
@@ -174,9 +175,9 @@
 .. python-script::
  
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   prj = ProjectionPlot(pf, 2, 'density', center=[0.53, 0.53, 0.53],
-                        width=(25, 'kpc'), weight_field=None)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   prj = ProjectionPlot(pf, 2, 'density', width=(25, 'kpc'), 
+                        weight_field=None)
    prj.save()
 
 will create a projection of Density field along the x axis, plot it,
@@ -205,11 +206,11 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
-   cut = OffAxisSlicePlot(pf, L, 'density', width=(25, 'kpc'),
-                          center=[0.53, 0.53, 0.53], north_vector=north_vector)
+   cut = OffAxisSlicePlot(pf, L, 'density', width=(25, 'kpc'), 
+                          north_vector=north_vector)
    cut.save()
 
 creates an off-axis slice in the plane perpendicular to ``L``,
@@ -246,11 +247,11 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
    W = [0.02, 0.02, 0.02]
-   c = [0.53, 0.53, 0.53]
+   c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
    write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
@@ -268,11 +269,10 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    L = [1,1,0] # vector normal to cutting plane
    north_vector = [-1,1,0]
    prj = OffAxisProjectionPlot(pf,L,'density',width=(25, 'kpc'), 
-                               center=[0.53, 0.53, 0.53], 
                                north_vector=north_vector)
    prj.save()
 
@@ -292,8 +292,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.save()
 
 Panning and zooming
@@ -307,9 +307,10 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.pan((2/pf['kpc'],2/pf['kpc']))
+   from yt.units import kpc
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.pan((2*kpc, 2*kpc))
    slc.save()
 
 :class:`~yt.visualization.plot_window.SlicePlot.pan_rel` accepts deltas in units relative
@@ -318,8 +319,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.pan_rel((0.1, -0.1))
    slc.save()
 
@@ -328,8 +329,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.zoom(2)
    slc.save()
 
@@ -342,8 +343,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_axes_unit('Mpc')
    slc.save()
 
@@ -356,9 +357,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_center((0.53, 0.53))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_center((0.5, 0.5))
    slc.save()
 
 Fonts
@@ -369,8 +370,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_font({'family': 'sans-serif', 'style': 'italic','weight': 'bold', 'size': 24})
    slc.save()
 
@@ -388,9 +389,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_cmap('VorticitySquared', 'RdBu_r')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_cmap('density', 'RdBu_r')
    slc.save()
 
 The :class:`~yt.visualization.plot_window.SlicePlot.set_log` function accepts a field name
@@ -400,9 +401,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_log('VorticitySquared', False)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_log('density', False)
    slc.save()
 
 Lastly, the :class:`~yt.visualization.plot_window.SlicePlot.set_zlim` function makes it
@@ -411,9 +412,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
-   slc.set_zlim('VorticitySquared', 1e-30, 1e-25)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
+   slc.set_zlim('density', 1e-30, 1e-25)
    slc.save()
 
 Set the size of the plot
@@ -427,8 +428,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_window_size(10)
    slc.save()
 
@@ -438,8 +439,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   slc = SlicePlot(pf, 'z', 'VorticitySquared', width=(10,'kpc'), center='max')
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   slc = SlicePlot(pf, 'z', 'density', width=(10,'kpc'))
    slc.set_buff_size(1600)
    slc.save()
 
@@ -464,8 +465,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   my_galaxy = pf.disk([0.53, 0.53, 0.53], [0.0, 0.0, 1.0], 0.01, 0.003)
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   my_galaxy = pf.disk([0.5, 0.5, 0.5], [0.0, 0.0, 1.0], 0.01, 0.003)
    plot = ProfilePlot(my_galaxy, "density", ["temperature"])
    plot.save()
 
@@ -483,8 +484,8 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
-   my_sphere = pf.sphere([0.53, 0.53, 0.53], (100, "pc"))
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+   my_sphere = pf.sphere([0.5, 0.5, 0.5], (100, "kpc"))
    plot = ProfilePlot(my_sphere, "temperature", ["cell_mass"],
                       weight_field=None)
    plot.save()
@@ -589,7 +590,7 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    my_sphere = pf.sphere("c", (50, "kpc"))
    plot = PhasePlot(my_sphere, "density", "temperature", ["cell_mass"],
                     weight_field=None)
@@ -602,9 +603,9 @@
 .. python-script::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    my_sphere = pf.sphere("c", (50, "kpc"))
-   plot = PhasePlot(my_sphere, "density", "temperature", ["HI_Fraction"],
+   plot = PhasePlot(my_sphere, "density", "temperature", ["H_fraction"],
                     weight_field="cell_mass")
    plot.save()
 
@@ -646,7 +647,7 @@
 .. notebook-cell::
 
    from yt.mods import *
-   pf = load("HiresIsolatedGalaxy/DD0044/DD0044")
+   pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
    p = ProjectionPlot(pf, "x", "density", center='m', width=(10,'kpc'),
                       weight_field='density')
    p.show()

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 doc/source/visualizing/volume_rendering.rst
--- a/doc/source/visualizing/volume_rendering.rst
+++ b/doc/source/visualizing/volume_rendering.rst
@@ -142,8 +142,7 @@
 :class:`~yt.visualization.volume_rendering.camera.Camera`, which represents a
 viewpoint into a volume.  The camera optionally accepts a volume, which can be
 either an instance of
-:class:`~yt.visualization.volume_rendering.grid_partitioner.HomogenizedVolume`
-or an instance of :class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` that
+:class:`~yt.utilities.amr_kdtree.amr_kdtree.AMRKDTree` that
 has already been initialized.  If one is not supplied, the camera will generate
 one itself.  This can also be specified if you wish to save bricks between
 repeated calls, thus saving considerable amounts of time.

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -74,25 +74,84 @@
 
 __version__ = "3.0-dev"
 
-def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
-    import nose, os, sys
-    from yt.config import ytcfg
-    nose_argv = sys.argv
-    nose_argv += ['--exclude=answer_testing','--detailed-errors']
-    if verbose:
-        nose_argv.append('-v')
-    if run_answer_tests:
-        nose_argv.append('--with-answer-testing')
-    if answer_big_data:
-        nose_argv.append('--answer-big-data')
-    log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg.set("yt","suppressStreamLogging", 'True')
-    initial_dir = os.getcwd()
-    yt_file = os.path.abspath(__file__)
-    yt_dir = os.path.dirname(yt_file)
-    os.chdir(yt_dir)
-    try:
-        nose.run(argv=nose_argv)
-    finally:
-        os.chdir(initial_dir)
-        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
+# First module imports
+import numpy as np # For modern purposes
+import numpy # In case anyone wishes to use it by name
+
+from yt.funcs import \
+    iterable, \
+    get_memory_usage, \
+    print_tb, \
+    rootonly, \
+    insert_ipython, \
+    get_pbar, \
+    only_on_root, \
+    is_root, \
+    get_version_stack, \
+    get_yt_supp, \
+    get_yt_version, \
+    parallel_profile, \
+    enable_plugins, \
+    memory_checker, \
+    deprecated_class
+from yt.utilities.logger import ytLogger as mylog
+
+import yt.utilities.physical_constants as physical_constants
+import yt.units as units
+from yt.units.yt_array import YTArray, YTQuantity
+
+from yt.fields.api import \
+    field_plugins, \
+    DerivedField, \
+    FieldDetector, \
+    FieldInfoContainer, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    add_field, \
+    derived_field
+
+from yt.data_objects.api import \
+    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+    DatasetSeries, \
+    ImageArray, particle_filter, create_profile, \
+    Profile1D, Profile2D, Profile3D
+
+from yt.frontends.api import _frontend_container
+frontends = _frontend_container()
+
+from yt.frontends.stream.api import \
+    load_uniform_grid, load_amr_grids, \
+    load_particles, load_hexahedral_mesh, load_octree
+
+# For backwards compatibility
+GadgetDataset = frontends.sph.GadgetDataset
+GadgetStaticOutput = deprecated_class(GadgetDataset)
+TipsyDataset = frontends.sph.TipsyDataset
+TipsyStaticOutput = deprecated_class(TipsyDataset)
+
+# Now individual component imports from the visualization API
+from yt.visualization.api import \
+    PlotCollection, PlotCollectionInteractive, \
+    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
+    write_bitmap, write_image, \
+    apply_colormap, scale_image, write_projection, \
+    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
+    ProjectionPlot, OffAxisProjectionPlot, \
+    show_colormaps, ProfilePlot, PhasePlot
+
+from yt.visualization.volume_rendering.api import \
+    off_axis_projection
+
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_objects, enable_parallelism
+
+from yt.convenience import \
+    load, simulation
+
+# Import some helpful math utilities
+from yt.utilities.math_utils import \
+    ortho_find, quartiles, periodic_position
+

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,14 +63,13 @@
     HaloProfiler, \
     FakeProfile
 
-from .index_subset.api import \
+from .hierarchy_subset.api import \
     ConstructedRootGrid, \
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \
     ExtractedParameterFile
 
 from .level_sets.api import \
-    coalesce_join_tree, \
     identify_contours, \
     Clump, \
     find_clumps, \

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -44,8 +44,9 @@
     parallel_root_only, \
     parallel_objects
 from yt.utilities.physical_constants import \
-    mass_sun_cgs, \
-    rho_crit_now
+    mass_sun_cgs
+from yt.utilities.physical_ratios import \
+    rho_crit_g_cm3_h2
 from yt.visualization.fixed_resolution import \
     FixedResolutionBuffer
 from yt.visualization.image_writer import write_image
@@ -932,7 +933,7 @@
         if 'ActualOverdensity' in profile.keys():
             return
 
-        rhocritnow = rho_crit_now * self.pf.hubble_constant**2 # g cm^-3
+        rhocritnow = rho_crit_g_cm3_h2 * self.pf.hubble_constant**2 # g cm^-3
         rho_crit = rhocritnow * ((1.0 + self.pf.current_redshift)**3.0)
         if not self.use_critical_density: rho_crit *= self.pf.omega_matter
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ b/yt/analysis_modules/hierarchy_subset/api.py
@@ -1,5 +1,5 @@
 """
-API for index_subset
+API for hierarchy_subset
 
 
 
@@ -13,7 +13,7 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from .index_subset import \
+from .hierarchy_subset import \
     ConstructedRootGrid, \
     AMRExtractedGridProxy, \
     ExtractedHierarchy, \

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/analysis_modules/hierarchy_subset/setup.py
--- a/yt/analysis_modules/hierarchy_subset/setup.py
+++ b/yt/analysis_modules/hierarchy_subset/setup.py
@@ -7,7 +7,7 @@
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
-    config = Configuration('index_subset', parent_package, top_path)
+    config = Configuration('hierarchy_subset', parent_package, top_path)
     config.make_config_py()  # installs __config__.py
     #config.make_svn_version_py()
     return config

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -39,7 +39,7 @@
     pass
 
 vlist = "xyz"
-def setup_sunyaev_zeldovich_fields(registry, ftype, sl_info):
+def setup_sunyaev_zeldovich_fields(registry, ftype = "gas", slice_info = None):
     def _t_squared(field, data):
         return data["gas","density"]*data["gas","kT"]*data["gas","kT"]
     registry.add_field(("gas", "t_squared"),
@@ -53,7 +53,7 @@
 
     def _beta_par_squared(field, data):
         return data["gas","beta_par"]**2/data["gas","density"]
-    registry.add_field("gas","beta_par_squared",
+    registry.add_field(("gas","beta_par_squared"),
                        function = _beta_par_squared,
                        units="g/cm**3")
 
@@ -148,7 +148,9 @@
         L[axis] = 1.0
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
+        proj = self.pf.h.proj("density", axis, center=ctr, data_source=source)
+        proj.set_field_parameter("axis", axis)
         frb = proj.to_frb(width, nx)
         dens = frb["density"]
         Te = frb["t_sz"]/dens
@@ -211,7 +213,7 @@
             raise NotImplementedError
 
         beta_par = generate_beta_par(L)
-        self.pf.field_info.add_field(name=("gas","beta_par"), function=beta_par, units="g/cm**3")
+        self.pf.field_info.add_field(("gas","beta_par"), function=beta_par, units="g/cm**3")
 
         dens    = off_axis_projection(self.pf, ctr, L, w, nx, "density")
         Te      = off_axis_projection(self.pf, ctr, L, w, nx, "t_sz")/dens

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -216,7 +216,7 @@
     """
     _key_fields = YTSelectionContainer2D._key_fields + ['weight_field']
     _type_name = "proj"
-    _con_args = ('axis', 'weight_field')
+    _con_args = ('axis', 'field', 'weight_field')
     _container_fields = ('px', 'py', 'pdx', 'pdy', 'weight_field')
     def __init__(self, field, axis, weight_field = None,
                  center = None, pf = None, data_source=None, 
@@ -241,6 +241,10 @@
         return self.data_source.blocks
 
     @property
+    def field(self):
+        return [k for k in self.field_data.keys() if k not in self._container_fields]
+
+    @property
     def _mrep(self):
         return MinimalProjectionData(self)
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -802,6 +802,8 @@
             if center is None:
                 center = (self.pf.domain_right_edge
                         + self.pf.domain_left_edge)/2.0
+        elif iterable(center) and not isinstance(center, YTArray):
+            center = self.pf.arr(center, 'code_length')
         if iterable(width):
             w, u = width
             width = self.pf.arr(w, input_units = u)
@@ -1262,8 +1264,7 @@
     def _get_cut_mask(self, grid, field=None):
         if self._is_fully_enclosed(grid):
             return True # We do not want child masking here
-        if not isinstance(grid, (FakeGridForParticles,)) \
-             and grid.id in self._cut_masks:
+        if grid.id in self._cut_masks:
             return self._cut_masks[grid.id]
         # If we get this far, we have to generate the cut_mask.
         return self._get_level_mask(self.regions, grid)
@@ -1320,6 +1321,5 @@
                     this_cut_mask)
             if item == "OR":
                 np.bitwise_or(this_cut_mask, level_masks[i+1], this_cut_mask)
-        if not isinstance(grid, FakeGridForParticles):
-            self._cut_masks[grid.id] = this_cut_mask
+        self._cut_masks[grid.id] = this_cut_mask
         return this_cut_mask

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/fields/fluid_vector_fields.py
--- a/yt/fields/fluid_vector_fields.py
+++ b/yt/fields/fluid_vector_fields.py
@@ -28,8 +28,9 @@
     just_one
 
 from .vector_operations import \
-     create_magnitude_field
-    
+     create_magnitude_field, \
+     create_squared_field
+
 @register_field_plugin
 def setup_fluid_vector_fields(registry, ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.
@@ -126,6 +127,9 @@
     create_magnitude_field(registry, "vorticity", "1/s",
                            ftype=ftype, slice_info=slice_info,
                            validators=vort_validators)
+    create_squared_field(registry, "vorticity", "1/s**2",
+                         ftype=ftype, slice_info=slice_info,
+                         validators=vort_validators)
 
     def _vorticity_stretching_x(field, data):
         return data[ftype, "velocity_divergence"] * data[ftype, "vorticity_x"]

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -53,6 +53,12 @@
              / amu_cgs
     return _number_density
 
+def _create_density_func(ftype, species):
+    def _density(field, data):
+        return data[ftype, "%s_fraction" % species] \
+            * data[ftype,'density']
+    return _density
+
 def add_species_field_by_density(registry, ftype, species):
     """
     This takes a field registry, a fluid type, and a species name and then
@@ -68,3 +74,19 @@
     registry.add_field((ftype, "%s_number_density" % species),
                         function = _create_number_density_func(ftype, species),
                         units = "cm**-3")
+
+def add_species_field_by_fraction(registry, ftype, species):
+    """
+    This takes a field registry, a fluid type, and a species name and then
+    adds the other fluids based on that.  This assumes that the field
+    "SPECIES_fraction" already exists and refers to mass fraction.
+    """
+    registry.add_field((ftype, "%s_density" % species), 
+                        function = _create_density_func(ftype, species),
+                        units = "g/cm**3")
+    registry.add_field((ftype, "%s_mass" % species),
+                        function = _create_mass_func(ftype, species),
+                        units = "g")
+    registry.add_field((ftype, "%s_number_density" % species),
+                        function = _create_number_density_func(ftype, species),
+                        units = "cm**-3")

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/fields/vector_operations.py
--- a/yt/fields/vector_operations.py
+++ b/yt/fields/vector_operations.py
@@ -61,6 +61,28 @@
                        function = _magnitude, units = field_units,
                        validators = validators, particle_type = particle_type)
 
+def create_squared_field(registry, basename, field_units,
+                         ftype = "gas", slice_info = None,
+                         validators = None, particle_type=False):
+
+    xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in 'xyz']
+
+    # Is this safe?
+    if registry.pf.dimensionality < 3:
+        zn = ("index", "zeros")
+    if registry.pf.dimensionality < 2:
+        yn = ("index", "zeros")
+
+    def _squared(field, data):
+        squared  = data[xn] * data[xn]
+        squared += data[yn] * data[yn]
+        squared += data[zn] * data[zn]
+        return squared
+
+    registry.add_field((ftype, "%s_squared" % basename),
+                       function = _squared, units = field_units,
+                       validators = validators, particle_type = particle_type)
+
 def create_vector_fields(registry, basename, field_units,
                          ftype = "gas", slice_info = None):
     # slice_info would be the left, the right, and the factor.

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/api.py
--- a/yt/frontends/api.py
+++ b/yt/frontends/api.py
@@ -12,3 +12,29 @@
 #
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
+
+import sys, types, os, glob, cPickle, time, importlib
+
+_frontends = [
+    'art',
+    'artio',
+    'athena',
+    'boxlib',
+    'chombo',
+    'enzo',
+    'fits',
+    'flash',
+    'gdf',
+    'halo_catalogs',
+    'moab',
+    #'pluto',
+    'ramses',
+    'sph',
+    'stream',
+]
+
+class _frontend_container:
+    def __init__(self):
+        for frontend in _frontends:
+            _mod = "yt.frontends.%s.api" % frontend
+            setattr(self, frontend, importlib.import_module(_mod))

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -14,7 +14,6 @@
 #-----------------------------------------------------------------------------
 import numpy as np
 import os.path
-import glob
 import stat
 import weakref
 import cStringIO
@@ -203,14 +202,17 @@
         particle header, star files, etc.
         """
         base_prefix, base_suffix = filename_pattern['amr']
+        aexpstr = 'a'+file_amr.rsplit('a',1)[1].replace(base_suffix,'')
         possibles = glob.glob(os.path.dirname(file_amr)+"/*")
         for filetype, (prefix, suffix) in filename_pattern.iteritems():
             # if this attribute is already set skip it
             if getattr(self, "_file_"+filetype, None) is not None:
                 continue
-            stripped = file_amr.replace(base_prefix, prefix)
-            stripped = stripped.replace(base_suffix, suffix)
-            match, = difflib.get_close_matches(stripped, possibles, 1, 0.6)
+            match = None
+            for possible in possibles:
+                if possible.endswith(aexpstr+suffix):
+                    if os.path.basename(possible).startswith(prefix):
+                        match = possible
             if match is not None:
                 mylog.info('discovered %s:%s', filetype, match)
                 setattr(self, "_file_"+filetype, match)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/art/tests/test_outputs.py
--- a/yt/frontends/art/tests/test_outputs.py
+++ b/yt/frontends/art/tests/test_outputs.py
@@ -1,5 +1,5 @@
 """
-ART frontend tests using SFG1 a=0.330
+ART frontend tests using D9p a=0.500
 
 
 
@@ -22,20 +22,22 @@
     data_dir_load
 from yt.frontends.art.api import ARTDataset
 
-_fields = ("Density", "particle_mass", ("all", "particle_position_x"))
+_fields = ("Temperature", "Density", "particle_mass", ("all", "particle_position_x"))
 
-sfg1 = "10MpcBox_csf512_a0.330.d"
+d9p = "D9p_500/10MpcBox_HartGal_csf_a0.500.d"
 
-
- at requires_pf(sfg1, big_data=True)
-def test_sfg1():
-    pf = data_dir_load(sfg1)
-    yield assert_equal, str(pf), "10MpcBox_csf512_a0.330.d"
+ at requires_pf(d9p, big_data=True)
+def test_d9p():
+    pf = data_dir_load(d9p)
+    yield assert_equal, str(pf), "10MpcBox_HartGal_csf_a0.500.d"
+    for test in big_patch_amr(d9p, _fields):
+        test_d9p.__name__ = test.description
+        yield test
     dso = [None, ("sphere", ("max", (0.1, 'unitary')))]
     for field in _fields:
         for axis in [0, 1, 2]:
             for ds in dso:
                 for weight_field in [None, "Density"]:
                     yield PixelizedProjectionValuesTest(
-                        sfg1, axis, field, weight_field,
+                        d9p, axis, field, weight_field,
                         ds)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -177,7 +177,7 @@
             if self.dimensionality < 3:
                 dx[i].append(DRE[2] - DLE[1])
         self.level_dds = np.array(dx, dtype="float64")
-        coordinate_type = int(header_file.next())
+        header_file.next()
         if self.pf.geometry == "cartesian":
             default_ybounds = (0.0, 1.0)
             default_zbounds = (0.0, 1.0)
@@ -580,7 +580,11 @@
         header_file.readline()
         self._header_mesh_start = header_file.tell()
         header_file.next()
-        coordinate_type = int(header_file.next())
+        next_line = header_file.next()
+        if len(next_line.split()) == 1:
+            coordinate_type = int(next_line)
+        else:
+            coordinate_type = 0
         if coordinate_type == 0:
             self.geometry = "cartesian"
         elif coordinate_type == 1:

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -19,6 +19,7 @@
 import stat
 import numpy as np
 import weakref
+import warnings
 
 from yt.config import ytcfg
 from yt.funcs import *
@@ -250,7 +251,9 @@
         except:
             pass
         try:
-            fileh = pyfits.open(args[0])
+            with warnings.catch_warnings():
+                warnings.filterwarnings('ignore', category=UserWarning, append=True)
+                fileh = pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -38,7 +38,7 @@
     mass_sun_cgs
 from yt.utilities.cosmology import Cosmology
 from .fields import \
-    SPHFieldInfo
+    SPHFieldInfo, OWLSFieldInfo, TipsyFieldInfo
 from .definitions import \
     gadget_header_specs, \
     gadget_field_specs, \
@@ -284,6 +284,7 @@
 
 class OWLSDataset(GadgetHDF5Dataset):
     _particle_mass_name = "Mass"
+    _field_info_class = OWLSFieldInfo
 
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")
@@ -355,7 +356,7 @@
 class TipsyDataset(ParticleDataset):
     _index_class = ParticleIndex
     _file_class = TipsyFile
-    _field_info_class = SPHFieldInfo
+    _field_info_class = TipsyFieldInfo
     _particle_mass_name = "Mass"
     _particle_coordinates_name = "Coordinates"
     _header_spec = (('time',    'd'),
@@ -367,7 +368,6 @@
                     ('dummy',   'i'))
 
     def __init__(self, filename, dataset_type="tipsy",
-                 endian=">",
                  field_dtypes=None,
                  domain_left_edge=None,
                  domain_right_edge=None,
@@ -377,7 +377,7 @@
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        self.endian = endian
+        self.endian = self._validate_header(filename)[1]
         self.storage_filename = None
         if domain_left_edge is None:
             domain_left_edge = np.zeros(3, "float64") - 0.5
@@ -514,10 +514,35 @@
             density_unit = self.mass_unit / self.length_unit**3
         self.time_unit = 1.0 / np.sqrt(G * density_unit)
 
+    @staticmethod
+    def _validate_header(filename):
+        try:
+            f = open(filename,'rb')
+        except:
+            return False, 1
+        fs = len(f.read())
+        f.seek(0)
+        #Read in the header
+        t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))
+        endianswap = "<"
+        #Check Endianness
+        if (ndim < 1 or ndim > 3):
+            endianswap = ">"
+            f.seek(0)
+            t, n, ndim, ng, nd, ns = struct.unpack(">diiiii", f.read(28))
+        #Catch for 4 byte padding
+        if (fs == 32+48*ng+36*nd+44*ns):
+            f.read(4)
+        #File is borked if this is true
+        elif (fs != 28+48*ng+36*nd+44*ns):
+            f.close()
+            return False, 0
+        f.close()
+        return True, endianswap
+
     @classmethod
     def _is_valid(self, *args, **kwargs):
-        # We do not allow load() of these files.
-        return False
+        return TipsyDataset._validate_header(args[0])[0]
 
 class HTTPParticleFile(ParticleFile):
     pass

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -23,6 +23,10 @@
     gadget_ptypes, \
     ghdf5_ptypes
 
+from yt.fields.species_fields import add_species_field_by_fraction
+
+
+
 # Here are helper functions for things like vector fields and so on.
 
 def _get_conv(cf):
@@ -50,3 +54,70 @@
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
     )
+
+
+
+class TipsyFieldInfo(SPHFieldInfo):
+
+    def __init__(self, pf, field_list, slice_info = None):
+        aux_particle_fields = {
+                'uDotFB':("uDotFB", ("code_mass * code_velocity**2", ["uDotFB"], None)),
+                'uDotAV':("uDotAV", ("code_mass * code_velocity**2", ["uDotAV"], None)),
+                'uDotPdV':("uDotPdV", ("code_mass * code_velocity**2", ["uDotPdV"], None)),
+                'uDotHydro':("uDotHydro", ("code_mass * code_velocity**2", ["uDotHydro"], None)),
+                'uDotDiff':("uDotDiff", ("code_mass * code_velocity**2", ["uDotDiff"], None)),
+                'uDot':("uDot", ("code_mass * code_velocity**2", ["uDot"], None)),
+                'coolontime':("coolontime", ("code_time", ["coolontime"], None)),
+                'timeform':("timeform", ("code_time", ["timeform"], None)),
+                'massform':("massform", ("code_mass", ["massform"], None)),
+                'HI':("HI", ("dimensionless", ["HI"], None)),
+                'HII':("HII", ("dimensionless", ["HII"], None)),
+                'HeI':("HeI", ("dimensionless", ["HeI"], None)),
+                'HeII':("HeII", ("dimensionless", ["HeII"], None)),
+                'OxMassFrac':("OxMassFrac", ("dimensionless", ["OxMassFrac"], None)),
+                'FeMassFrac':("FeMassFrac", ("dimensionless", ["FeMassFrac"], None)),
+                'c':("c", ("code_velocity", ["c"], None)),
+                'acc':("acc", ("code_velocity / code_time", ["acc"], None)),
+                'accg':("accg", ("code_velocity / code_time", ["accg"], None))}
+        for field in field_list:
+            if field[1] in aux_particle_fields.keys() and \
+                aux_particle_fields[field[1]] not in self.known_particle_fields:
+                self.known_particle_fields += (aux_particle_fields[field[1]],)
+        super(TipsyFieldInfo,self).__init__(pf, field_list, slice_info)
+
+
+        
+
+class OWLSFieldInfo(SPHFieldInfo):
+
+    _species_fractions = ['H_fraction', 'He_fraction', 'C_fraction',
+                          'N_fraction', 'O_fraction', 'Ne_fraction',
+                          'Mg_fraction', 'Si_fraction', 'Fe_fraction']
+
+    # override
+    #--------------------------------------------------------------
+    def __init__(self, *args, **kwargs):
+        
+        new_particle_fields = (
+            ('Hydrogen', ('', ['H_fraction'], None)),
+            ('Helium', ('', ['He_fraction'], None)),
+            ('Carbon', ('', ['C_fraction'], None)),
+            ('Nitrogen', ('', ['N_fraction'], None)),
+            ('Oxygen', ('', ['O_fraction'], None)),
+            ('Neon', ('', ['Ne_fraction'], None)),
+            ('Magnesium', ('', ['Mg_fraction'], None)),
+            ('Silicon', ('', ['Si_fraction'], None)),
+            ('Iron', ('', ['Fe_fraction'], None))
+            )
+
+        self.known_particle_fields += new_particle_fields
+        
+        super(OWLSFieldInfo,self).__init__( *args, **kwargs )
+
+
+        
+    def setup_fluid_fields(self):
+        # here species_name is "H", "He", etc
+        for s in self._species_fractions:
+            species_name = s.split('_')[0]
+            add_species_field_by_fraction(self, "gas", species_name)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,6 +53,9 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
     _known_ptypes = ghdf5_ptypes
     _var_mass = None
+    _element_fields = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
+                       'Neon', 'Magnesium', 'Silicon', 'Iron' )
+
 
     @property
     def var_mass(self):
@@ -100,13 +103,20 @@
                 del coords
                 if mask is None: continue
                 for field in field_list:
+                    
                     if field in ("Mass", "Masses") and \
                         ptype not in self.var_mass:
                         data = np.empty(mask.sum(), dtype="float64")
                         ind = self._known_ptypes.index(ptype) 
                         data[:] = self.pf["Massarr"][ind]
+
+                    elif field in self._element_fields:
+                        rfield = 'ElementAbundance/' + field
+                        data = g[rfield][:][mask,...]
+
                     else:
                         data = g[field][:][mask,...]
+
                     yield (ptype, field), data
             f.close()
 
@@ -144,24 +154,46 @@
         npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) 
         return npart
 
+
     def _identify_fields(self, data_file):
         f = _get_h5_handle(data_file.filename)
         fields = []
-        cname = self.pf._particle_coordinates_name
-        mname = self.pf._particle_mass_name
-        for key in f.keys():
+        cname = self.pf._particle_coordinates_name  # Coordinates
+        mname = self.pf._particle_mass_name  # Mass
+
+        # loop over all keys in OWLS hdf5 file
+        #--------------------------------------------------
+        for key in f.keys():   
+
+            # only want particle data
+            #--------------------------------------
             if not key.startswith("PartType"): continue
+
+            # particle data group
+            #--------------------------------------
             g = f[key]
             if cname not in g: continue
+
+            # note str => not unicode!
+
             #ptype = int(key[8:])
             ptype = str(key)
+
+            # loop over all keys in PartTypeX group
+            #----------------------------------------
             for k in g.keys():
-                if not hasattr(g[k], "shape"): continue
-                # str => not unicode!
-                fields.append((ptype, str(k)))
-            if mname not in g.keys():
-                # We'll append it anyway.
-                fields.append((ptype, mname))
+
+                if k == 'ElementAbundance':
+                    gp = g[k]
+                    for j in gp.keys():
+                        kk = j
+                        fields.append((ptype, str(kk)))
+                else:
+                    kk = k
+                    if not hasattr(g[kk], "shape"): continue
+                    fields.append((ptype, str(kk)))
+
+
         f.close()
         return fields, {}
 
@@ -420,12 +452,12 @@
                     raise RuntimeError
             
         # Use the mask to slice out the appropriate particle type data
-        if mask.size == data_file.total_particles['DarkMatter']:
-            return auxdata[:data_file.total_particles['DarkMatter']]
-        elif mask.size == data_file.total_particles['Gas']:
-            return auxdata[data_file.total_particles['DarkMatter']:data_file.total_particles['Stars']]
+        if mask.size == data_file.total_particles['Gas']:
+            return auxdata[:data_file.total_particles['Gas']]
+        elif mask.size == data_file.total_particles['DarkMatter']:
+            return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
         else:
-            return auxdata[data_file.total_particles['Stars']:]
+            return auxdata[-data_file.total_particles['Stars']:]
 
     def _fill_fields(self, fields, vals, mask, data_file):
         if mask is None:

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -727,3 +727,14 @@
         return cls(*args, **kwargs)
     return _func
     
+def enable_plugins():
+    from yt.config import ytcfg
+    my_plugin_name = ytcfg.get("yt","pluginfilename")
+    # We assume that it is with respect to the $HOME/.yt directory
+    if os.path.isfile(my_plugin_name):
+        _fn = my_plugin_name
+    else:
+        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
+    if os.path.isfile(_fn):
+        mylog.info("Loading plugins from %s", _fn)
+        execfile(_fn)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/geometry/cartesian_coordinates.py
--- a/yt/geometry/cartesian_coordinates.py
+++ b/yt/geometry/cartesian_coordinates.py
@@ -27,7 +27,7 @@
 
     def setup_fields(self, registry):
         for axi, ax in enumerate('xyz'):
-            f1, f2 = _get_coord_fields(axi, ax)
+            f1, f2 = _get_coord_fields(axi)
             registry.add_field(("index", "d%s" % ax), function = f1,
                                display_field = False,
                                units = "code_length")

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -492,7 +492,7 @@
             coords[:,i] += self.DLE[i]
         return coords
 
-    def save_octree(self):
+    def save_octree(self, always_descend = False):
         # Get the header
         header = dict(dims = (self.nn[0], self.nn[1], self.nn[2]),
                       left_edge = (self.DLE[0], self.DLE[1], self.DLE[2]),
@@ -507,9 +507,15 @@
         data.nz = 8
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
         ref_mask = np.zeros(self.nocts * 8, dtype="uint8") - 1
-        data.array = <void *> ref_mask.data
+        cdef void *p[2]
+        cdef np.uint8_t ad = int(always_descend)
+        p[0] = <void *> &ad
+        p[1] = ref_mask.data
+        data.array = p
         # Enforce partial_coverage here
         self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
+        if always_descend:
+            ref_mask = ref_mask[:data.index-1]
         header['octree'] = ref_mask
         return header
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -176,9 +176,14 @@
     arr[o.domain - 1] += 1
 
 cdef void store_octree(Oct *o, OctVisitorData *data, np.uint8_t selected):
-    cdef np.uint8_t *arr, res, ii
+    cdef np.uint8_t *arr, res, ii, *always_descend
     ii = cind(data.ind[0], data.ind[1], data.ind[2])
-    arr = <np.uint8_t *> data.array
+    cdef void **p = <void **> data.array
+    always_descend = <np.uint8_t *> p[0]
+    arr = <np.uint8_t *> p[1]
+    if always_descend[0] == 1 and data.last == o.domain_ind:
+        return
+    data.last = o.domain_ind
     if o.children == NULL or o.children[ii] == NULL:
         res = 0
     else:

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -20,11 +20,8 @@
 # ALL IMPORTS GO HERE
 #
 
-# First module imports
-import sys, types, os, glob, cPickle, time
-import numpy as na # For historical reasons
-import numpy as np # For modern purposes
-import numpy # In case anyone wishes to use it by name
+import os
+from yt import *
 
 # This next item will handle most of the actual startup procedures, but it will
 # also attempt to parse the command line and set up the global state of various
@@ -35,13 +32,7 @@
 import yt.startup_tasks as __startup_tasks
 unparsed_args = __startup_tasks.unparsed_args
 
-from yt.funcs import *
-from yt.utilities.logger import ytLogger as mylog
-from yt.utilities.performance_counters import yt_counters, time_function
 from yt.config import ytcfg, ytcfg_defaults
-import yt.utilities.physical_constants as physical_constants
-import yt.units as units
-from yt.units.yt_array import YTArray, YTQuantity
 
 from yt.utilities.logger import level as __level
 if __level >= int(ytcfg_defaults["loglevel"]):
@@ -49,134 +40,6 @@
     mylog.debug("Turning off NumPy error reporting")
     np.seterr(all = 'ignore')
 
-from yt.fields.api import \
-    field_plugins, \
-    DerivedField, \
-    FieldDetector, \
-    FieldInfoContainer, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType, \
-    add_field, \
-    derived_field
-
-from yt.data_objects.api import \
-    BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
-    data_object_registry, \
-    DatasetSeries, AnalysisTask, analysis_task, \
-    ImageArray, particle_filter, create_profile, \
-    Profile1D, Profile2D, Profile3D
-
-from yt.frontends.enzo.api import \
-    EnzoDataset, EnzoDatasetInMemory, \
-    EnzoSimulation, EnzoFieldInfo, add_enzo_field
-
-# Boxlib stuff
-from yt.frontends.boxlib.api import \
-    BoxlibDataset
-
-# Orion stuff
-#from yt.frontends.boxlib.api import \
-#    OrionDataset, OrionFieldInfo, add_orion_field
-
-# Maestro stuff
-#from yt.frontends.boxlib.api import \
-#    MaestroDataset
-
-# Castro stuff
-#from yt.frontends.boxlib.api import \
-#    CastroDataset
-
-from yt.frontends.flash.api import \
-    FLASHDataset, FLASHFieldInfo
-
-from yt.frontends.artio.api import \
-    ARTIODataset, ARTIOFieldInfo
-
-from yt.frontends.ramses.api import \
-    RAMSESDataset, RAMSESFieldInfo
-
-from yt.frontends.halo_catalogs.api import \
-    HaloCatalogDataset, HaloCatalogFieldInfo, \
-    RockstarDataset, RockstarFieldInfo
-
-from yt.frontends.chombo.api import \
-    ChomboDataset, ChomboFieldInfo, add_chombo_field
-
-from yt.frontends.gdf.api import \
-    GDFDataset, GDFFieldInfo, add_gdf_field
-
-from yt.frontends.moab.api import \
-    MoabHex8Dataset, MoabFieldInfo, \
-    PyneMoabHex8Dataset, PyneFieldInfo
-
-from yt.frontends.athena.api import \
-    AthenaDataset, AthenaFieldInfo
-
-from yt.frontends.art.api import \
-    ARTDataset, ARTFieldInfo
-
-#from yt.frontends.pluto.api import \
-#     PlutoDataset, PlutoFieldInfo, add_pluto_field
-
-from yt.frontends.stream.api import \
-    StreamDataset, \
-    StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles, load_hexahedral_mesh, load_octree
-
-from yt.frontends.sph.api import \
-    OWLSDataset, SPHFieldInfo, \
-    GadgetDataset, GadgetHDF5Dataset, \
-    TipsyDataset
-
-# For backwards compatibility
-GadgetStaticOutput = deprecated_class(GadgetDataset)
-TipsyStaticOutput = deprecated_class(TipsyDataset)
-
-#from yt.analysis_modules.list_modules import \
-#    get_available_modules, amods
-#available_analysis_modules = get_available_modules()
-
-from yt.frontends.fits.api import \
-    FITSDataset, FITSFieldInfo
-
-# Import our analysis modules
-from yt.analysis_modules.halo_finding.api import \
-    HaloFinder
-
-from yt.utilities.definitions import \
-    axis_names, x_dict, y_dict, inv_axis_names
-
-# Now individual component imports from the visualization API
-from yt.visualization.api import \
-    PlotCollection, PlotCollectionInteractive, \
-    get_multi_plot, FixedResolutionBuffer, ObliqueFixedResolutionBuffer, \
-    callback_registry, write_bitmap, write_image, \
-    apply_colormap, scale_image, write_projection, \
-    SlicePlot, AxisAlignedSlicePlot, OffAxisSlicePlot, \
-    ProjectionPlot, OffAxisProjectionPlot, \
-    show_colormaps, ProfilePlot, PhasePlot
-
-from yt.visualization.volume_rendering.api import \
-    ColorTransferFunction, PlanckTransferFunction, ProjectionTransferFunction, \
-    Camera, off_axis_projection, MosaicFisheyeCamera
-
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    parallel_objects
-
-for name, cls in callback_registry.items():
-    exec("%s = cls" % name)
-
-from yt.convenience import \
-    load, projload, simulation
-
-# Import some helpful math utilities
-from yt.utilities.math_utils import \
-    ortho_find, quartiles, periodic_position
-
-
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions
 # that get used every time that they don't have to *define* every time.
@@ -184,12 +47,4 @@
 # Unfortunately, for now, I think the easiest and simplest way of doing
 # this is also the most dangerous way.
 if ytcfg.getboolean("yt","loadfieldplugins"):
-    my_plugin_name = ytcfg.get("yt","pluginfilename")
-    # We assume that it is with respect to the $HOME/.yt directory
-    if os.path.isfile(my_plugin_name):
-        _fn = my_plugin_name
-    else:
-        _fn = os.path.expanduser("~/.yt/%s" % my_plugin_name)
-    if os.path.isfile(_fn):
-        mylog.info("Loading plugins from %s", _fn)
-        execfile(_fn)
+    enable_plugins()

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -24,29 +24,18 @@
 exe_name = os.path.basename(sys.executable)
 # At import time, we determined whether or not we're being run in parallel.
 def turn_on_parallelism():
+    parallel_capable = False
     try:
         from mpi4py import MPI
     except ImportError as e:
         mylog.error("Warning: Attempting to turn on parallelism, " +
                     "but mpi4py import failed. Try pip install mpi4py.")
         raise e
-    parallel_capable = (MPI.COMM_WORLD.size > 1)
-    if parallel_capable:
-        mylog.info("Global parallel computation enabled: %s / %s",
-                   MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
-        ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
-        ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
-        ytcfg["yt","__parallel"] = "True"
-        if exe_name == "embed_enzo" or \
-            ("_parallel" in dir(sys) and sys._parallel == True):
-            ytcfg["yt","inline"] = "True"
-        # I believe we do not need to turn this off manually
-        #ytcfg["yt","StoreParameterFiles"] = "False"
-        # Now let's make sure we have the right options set.
-        if MPI.COMM_WORLD.rank > 0:
-            if ytcfg.getboolean("yt","LogFile"):
-                ytcfg["yt","LogFile"] = "False"
-                yt.utilities.logger.disable_file_logging()
+        # Now we have to turn on the parallelism from the perspective of the
+        # parallel_analysis_interface
+    from yt.utilities.parallel_tools.parallel_analysis_interface import \
+        enable_parallelism
+    parallel_capable = enable_parallelism()
     return parallel_capable
 
 # This fallback is for Paraview:

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -624,3 +624,25 @@
         return _func
     return compare_results(func)
 
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+    import nose, os, sys, yt
+    from yt.funcs import mylog
+    orig_level = mylog.getEffectiveLevel()
+    mylog.setLevel(50)
+    nose_argv = sys.argv
+    nose_argv += ['--exclude=answer_testing','--detailed-errors']
+    if verbose:
+        nose_argv.append('-v')
+    if run_answer_tests:
+        nose_argv.append('--with-answer-testing')
+    if answer_big_data:
+        nose_argv.append('--answer-big-data')
+    initial_dir = os.getcwd()
+    yt_file = os.path.abspath(yt.__file__)
+    yt_dir = os.path.dirname(yt_file)
+    os.chdir(yt_dir)
+    try:
+        nose.run(argv=nose_argv)
+    finally:
+        os.chdir(initial_dir)
+        mylog.setLevel(orig_level)

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/utilities/definitions.py
--- a/yt/utilities/definitions.py
+++ b/yt/utilities/definitions.py
@@ -47,8 +47,7 @@
                   'cm'    : cm_per_mpc}
 
 # Nicely formatted versions of common length units
-formatted_length_unit_names = {'mpc'     : 'Mpc',
-                               'au'      : 'AU',
+formatted_length_unit_names = {'au'      : 'AU',
                                'rsun'    : 'R_\odot',
                                'code_length': 'code\/length'}
 

diff -r e93bc0139d69b7ef46c7e79ee667d3e3272aa070 -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -2,8 +2,6 @@
 import setuptools
 import os, sys, os.path, glob, \
     tempfile, subprocess, shutil
-from yt.utilities.setup import \
-    check_for_dependencies
 
 def check_for_openmp():
     # Create a temporary directory

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/52835cd5c855/
Changeset:   52835cd5c855
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-01 21:11:41
Summary:     Adding a bit more flexibility to the halo mass function and cleaning up the state of the doc string.
Affected #:  2 files

diff -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -6,7 +6,7 @@
 .. versionadded:: 1.6
 
 The Halo Mass Function extension is capable of outputting the halo mass function
-for a collection haloes (input), and/or an analytical fit over a given mass range
+for a collection halos (input), and/or an analytical fit over a given mass range
 for a set of specified cosmological parameters.
 
 This extension is based on code generously provided by Brian O'Shea.
@@ -14,7 +14,7 @@
 General Overview
 ----------------
 
-In order to run this extension on a dataset, the haloes need to be located
+In order to run this extension on a dataset, the halos need to be located
 (using HOP, FOF or Parallel HOP, see :ref:`halo_finding`),
 and their virial masses determined using the
 HaloProfiler (see :ref:`halo_profiling`).
@@ -24,7 +24,32 @@
 cosmological parameters will need to be input as well. These initial parameters
 are not stored in an Enzo dataset, so they must be set by hand.
 An analytical fit can be found without referencing a particular dataset or
-set of haloes, but all the cosmological parameters need to be set by hand.
+set of halos, but all the cosmological parameters need to be set by hand.
+
+Basic Halo Mass Function Creation
+---------------------------------
+
+Creating the halo mass function of simulated halos requires only loading a halo
+dataset and passing this to HaloMassFnc().
+
+.. code-block:: python
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+  halos_ds = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=halos_ds)
+
+If an analytic fit is also desired, additionally set `make_analytic=True', 
+changing the last line above to the following:
+
+.. code-block:: python
+  hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
+
+`hmf' is a HaloMassFnc object off which arrays holding the simulated halo masses,
+cumulative halo density
+
+
+
+
 
 Analytical Fits
 ---------------
@@ -45,38 +70,41 @@
 The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which
 appears to fit HOP threshold=80.0 fairly well.
 
-Analyze Simulated Haloes
+Analyze Simulated halos
 ------------------------
 
-If an analytical fit is not needed, it is simple to analyze a set of 
-haloes. The ``halo_file`` needs to be specified, and
-``fitting_function`` does not need to be specified.
-``num_sigma_bins`` is how many bins the halo masses are sorted into.
-The default is 360. ``mass_column`` is the zero-indexed column of the
-``halo_file`` file that contains the halo masses. The default is 5, which
-corresponds to the sixth column of data in the file.
+To create the halo mass function of halos found in a simulation, only the 
+loaded halo dataset needs to be specified.
 
 .. code-block:: python
-
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", num_sigma_bins=200,
-  mass_column=5)
+  halos_ds = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=halos_ds)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves
-the halo mass function to a text file. By default, both the halo analysis (``haloes``) and
-fit (``fit``) are written to (different) text files, but they can be turned on or off
-explicitly. ``prefix`` sets the name used for the file(s). The haloes file
-is named ``prefix-haloes.dat``, and the fit file ``prefix-fit.dat``.
+This will calculate the cumulative halo mass function for the halo dataset and
+create ``hmf'', a HaloMassFcn object with the arrays ``masses_sim'' and 
+``n_cumulative_sim'' hanging off of it. These arrays hold the halo masses in 
+units of solar mass and the cumulative number density of halos above that mass 
+in comoving Mpc^3, respectively.  
+
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+halo mass function to a text file. 
+
+
+
+By default, both the mass function of the
+simulated halos (``simulated``) and analytic fit (``analytic``) are written to 
+text files, but they can be turned on or off explicitly. ``prefix`` sets the name used for the file(s). The halos file
+is named ``prefix-halos.dat``, and the fit file ``prefix-fit.dat``.
 Continued from above, invoking this command:
 
 .. code-block:: python
 
-  hmf.write_out(prefix='hmf', fit=False, haloes=True)
+  hmf.write_out(prefix='hmf', fit=False, halos=True)
 
-will save the haloes data to a file named ``hmf-haloes.dat``. The contents
-of the ``-haloes.dat`` file is three columns:
+will save the halos data to a file named ``hmf-halos.dat``. The contents
+of the ``-halos.dat`` file is three columns:
 
   1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
   2. mass (Msolar/h) for this bin.
@@ -85,9 +113,43 @@
 Analytical Halo Mass Function Fit
 ---------------------------------
 
+To create an analytic mass function, several additional parameters which are 
+not necessarily attached to the dataset will need to be provided. If a halo 
+or simulation dataset is provided, the values that can be extracted directly 
+from it will be used. The following parameters will need to be set:
+
+`make_analytic=True'
+
+:math:`\Omega_{m}', `omega_matter0', Default=0.2726
+
+:math:`\Omega_{\Lambda}', `omega_lambda0', Default=0.7274
+
+:math:`\Omega_{b}', `omega_baryon0', Default=0.0456
+
+:math:`h', `hubble0', Default=0.704
+
+:math:`\sigma_8', `sigma8input', Default=0.86
+
+primordial index, `primordial_index', Default=1.0
+
+redshift, `this_redshift', Default=None
+
+log of the minimum halo mass, :math:`log_{10}M_{min}', `log_mass_min', Default=None
+
+log of the maximum halo mass, :math:`log_{10}M_{max}', `log_mass_max', Default=None
+
+Providing a simulation or halo dataset will generally set `omega_matter0',
+`omega_lambda0', `hubble0', and `this_redshift'. If `log_mass_min' or 
+`log_mass_max' are not specified but a halo dataset has been provided, the 
+range of halo masses will be used to set these parameters.
+
+
+
+
+
 When an analytical fit is desired, in nearly all cases several cosmological
 parameters will need to be specified by hand. These parameters are not
-stored with Enzo datasets. In the case where both the haloes and an analytical
+stored with Enzo datasets. In the case where both the halos and an analytical
 fit are desired, the analysis is instantiated as below.
 ``sigma8input``, ``primordial_index`` and ``omega_baryon0`` should be set to
 the same values as
@@ -108,7 +170,7 @@
   fitting_function=4)
   hmf.write_out(prefix='hmf')
 
-Both the ``-haloes.dat`` and ``-fit.dat`` files are written to disk.
+Both the ``-halos.dat`` and ``-fit.dat`` files are written to disk.
 The contents of the ``-fit.dat`` file is four columns:
 
   1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
@@ -116,12 +178,12 @@
   3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3) in this bin.
   4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
 
-Below is an example of the output for both the haloes and the (Warren)
+Below is an example of the output for both the halos and the (Warren)
 analytical fit, for three datasets. The black lines are the calculated
 halo mass functions, and the blue lines the analytical fit set by initial
 conditions. This simulation shows typical behavior, in that there are too
-few small haloes compared to the fit due to lack of mass and gravity resolution
-for small haloes. But at higher mass ranges, the simulated haloes are quite close
+few small halos compared to the fit due to lack of mass and gravity resolution
+for small halos. But at higher mass ranges, the simulated halos are quite close
 to the analytical fit.
 
 .. image:: _images/halo_mass_function.png
@@ -145,7 +207,7 @@
   omega_baryon0=0.06, hubble0=.7, this_redshift=0., log_mass_min=8.,
   log_mass_max=13., sigma8input=0.9, primordial_index=1.,
   fitting_function=1)
-  hmf.write_out(prefix="hmf-press-schechter", fit=True, haloes=False)
+  hmf.write_out(prefix="hmf-press-schechter", fit=True, halos=False)
 
 It is possible to access the output of the halo mass function without saving
 to disk. The content is stored in arrays hanging off the ``HaloMassFcn``

diff -r e0cce48bc6f3f504cf36b934a3d1b64f049d6ac9 -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -23,59 +23,140 @@
     parallel_blocking_call
 from yt.utilities.physical_ratios import \
     rho_crit_g_cm3_h2
+from yt.utilities.logger import ytLogger as mylog
 
 class HaloMassFcn(ParallelAnalysisInterface):
+    r"""
+    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    a function of mass.  A mass function can be created for a set of 
+    simulated halos, an analytic fit to can be created for a redshift and 
+    set of cosmological parameters, or both can be created.
+
+    Provided with a halo dataset object, this will make a the mass function 
+    for simulated halos.  Prodiving a simulation dataset will set as many 
+    of the cosmological parameters as possible for the creation of the 
+    analytic mass function.
+
+    The HaloMassFcn object has arrays hanging off of it containing the mass
+    function information.
+
+    masses_sim : Array 
+        Halo masses from simulated halos.
+    n_cumulative_sim : Array
+        Number density of halos with mass greater than the corresponding 
+        mass in masses_sim (simulated).
+    massarray : Array
+        Masses used for the generation of the analytic mass function.
+    nofmz_cum : Array
+        Number density of halos with mass greater then the corresponding
+        mass in massarray (analytic).
+
+    The HaloMassFcn object also has a convenience function write_out() that
+    will write out the data to disk.
+
+    Creating a HaloMassFcn object with no arguments will produce an analytic
+    mass function at redshift = 0 using default cosmolocigal values.
+
+    Parameters
+    ----------
+    simulation_ds : Simulation dataset object
+        The loaded simulation dataset, used to set cosmological paramters.
+        Default : None.
+    halos_ds : Halo dataset object
+        The halos from a simulation to be used for creation of the 
+        halo mass function in the simulation.
+        Default : None.
+    make_analytic : bool 
+        Whether or not to calculate the analytic mass function to go with 
+        the simulated halo mass function.  Automatically set to true if a 
+        simulation dataset is provided.
+        Default : False.
+    omega_matter0 : float
+        The fraction of the universe made up of matter (dark and baryonic). 
+        Default : 0.2726.
+    omega_lambda0 : float
+        The fraction of the universe made up of dark energy. 
+        Default : 0.7274.
+    omega_baryon0  : float 
+        The fraction of the universe made up of baryonic matter. This is not 
+        always stored in the datset and should be checked by hand.
+        Default : 0.0456.
+    hubble0 : float 
+        The expansion rate of the universe in units of 100 km/s/Mpc. 
+        Default : 0.704.
+    sigma8input : float 
+        The amplitude of the linear power spectrum at z=0 as specified by 
+        the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
+        8 Mpc/h. This is not always stored in the datset and should be 
+        checked by hand.
+        Default : 0.86.
+    primoridal_index : float 
+        This is the index of the mass power spectrum before modification by 
+        the transfer function. A value of 1 corresponds to the scale-free 
+        primordial spectrum. This is not always stored in the datset and 
+        should be checked by hand.
+        Default : 1.0.
+    this_redshift : float 
+        The current redshift. 
+        Default : 0.
+    log_mass_min : float 
+        The log10 of the mass of the minimum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 5.
+        Default : None.
+    log_mass_max : float 
+        The log10 of the mass of the maximum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 15.
+        Default : None.
+    num_sigma_bins : float
+        The number of bins (points) to use for the calculation of the 
+        analytic mass function. 
+        Default : 360.
+    fitting_function : int
+        Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+        3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
+        Default : 4.
+
+    Examples
+    --------
+
+    This creates the halo mass function for a halo dataset from a simulation
+    and the analytic mass function at the same redshift as the dataset,
+    using as many cosmological parameters as can be pulled from the dataset.
+
+    >>> halos_ds = load("rockstar_halos/halo_0.0.bin")
+    >>> hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
+    >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
+
+    This creates only the analytic halo mass function for a simulation
+    dataset, with default values for cosmological paramters not stored in 
+    the dataset.
+
+    >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> hmf = HaloMassFcn(ds=ds)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
+    
+    This creates the analytic mass function for an arbitrary set of 
+    cosmological parameters, without either a simulation or halo dataset.
+
+    >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+                          omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
+                          log_mass_min=5, log_mass_max=9)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
     """
-    Initalize a HaloMassFcn object to analyze the distribution of halos as 
-    a function of mass.
-    :param ds (str): The loaded simulation dataset.
-    Default=None.
-    :param halos_ds (str): The loaded halo dataset.
-    Default=None.
-    :param make_analytic (bool): Are we going to calculate an analytic mass
-    function, True for yes, False for no.
-    Default=False.
-    :param omega_matter0 (float): The fraction of the universe made up of
-    matter (dark and baryonic). Default=None.
-    :param omega_lambda0 (float): The fraction of the universe made up of
-    dark energy. Default=None.
-    :param omega_baryon0 (float): The fraction of the universe made up of
-    ordinary baryonic matter. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.05.
-    :param hubble0 (float): The expansion rate of the universe in units of
-    100 km/s/Mpc. Default=None.
-    :param sigma8input (float): The amplitude of the linear power
-    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-    in a top-hat sphere of radius 8 Mpc/h. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.86.
-    :param primoridal_index (float): This is the index of the mass power
-    spectrum before modification by the transfer function. A value of 1
-    corresponds to the scale-free primordial spectrum. This should match
-    the value used to make the initial conditions using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=1.0.
-    :param this_redshift (float): The current redshift. Default=None.
-    :param log_mass_min (float): The log10 of the mass of the minimum of the
-    halo mass range. Default=None.
-    :param log_mass_max (float): The log10 of the mass of the maximum of the
-    halo mass range. Default=None.
-    :param num_sigma_bins (float): The number of bins (points) to use for
-    the calculations and generated fit. Default=360.
-    :param fitting_function (int): Which fitting function to use.
-    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-    5 = Tinker
-    Default=4.
-    """
-    def __init__(self, ds=None, halos_ds=None, make_analytic=False, omega_matter0=None, 
-    omega_lambda0=None, omega_baryon0=0.05, hubble0=None, sigma8input=0.86, 
-    primordial_index=1.0, this_redshift=None, log_mass_min=None, log_mass_max=None, 
-    num_sigma_bins=360, fitting_function=4):
+    def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=False, 
+    omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, 
+    sigma8input=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
+    log_mass_max=None, num_sigma_bins=360, fitting_function=4):
         ParallelAnalysisInterface.__init__(self)
-        self.ds = ds
+        self.simulation_ds = simulation_ds
         self.halos_ds = halos_ds
         self.omega_matter0 = omega_matter0
         self.omega_lambda0 = omega_lambda0
@@ -89,36 +170,49 @@
         self.num_sigma_bins = num_sigma_bins
         self.fitting_function = fitting_function
         self.make_analytic = make_analytic
-
+        self.make_simulated = False
         """
         If we want to make an analytic mass function, grab what we can from either the 
         halo file or the data set, and make sure that the user supplied everything else
         that is needed.
         """
-        if make_analytic == True:
-            # First try to get it from the ds
-            if ds is not None:
-                self.omega_matter0 = self.ds.omega_matter
-                self.omega_lambda0 = self.ds.omega_lambda
-                self.hubble0 = self.ds.hubble_constant
-                self.this_redshift = self.ds.current_redshift
-            # If we can't do that, try to get it from the halos_ds
-            if ds is None and halos_ds is not None:
+        # If we don't have any datasets, make the analytic function with user values
+        if simulation_ds is None and halos_ds is None:
+            self.make_analytic=True
+            # Set a reasonable mass min and max if none were provided
+            if log_mass_min is None:
+                self.log_mass_min = 5
+            if log_mass_max is None:
+                self.log_mass_max = 15
+
+        # If we are given a simulation dataset, make the analytic mass function
+        if simulation_ds is not None:
+            self.make_analytic = True
+
+        # If we're making the analytic function...
+        if self.make_analytic == True:
+            # Try to set cosmological parameters from the simulation dataset
+            if simulation_ds is not None:
+                self.omega_matter0 = self.simulation_ds.omega_matter
+                self.omega_lambda0 = self.simulation_ds.omega_lambda
+                self.hubble0 = self.simulation_ds.hubble_constant
+                self.this_redshift = self.simulation_ds.current_redshift
+                # Set a reasonable mass min and max if none were provided
+                if log_mass_min is None:
+                    self.log_mass_min = 5
+                if log_mass_max is None:
+                    self.log_mass_max = 15
+            # If we have a halo dataset but not a simulation dataset, use that instead
+            if simulation_ds is None and halos_ds is not None:
                 self.omega_matter0 = self.halos_ds.omega_matter
                 self.omega_lambda0 = self.halos_ds.omega_lambda
                 self.hubble0 = self.halos_ds.hubble_constant
                 self.this_redshift = self.halos_ds.current_redshift
-            # Check that all the parameters for the analytic function have been set
-            if self.omega_matter0 == None or self.omega_lambda0 == None or \
-            self.hubble0 == None or self.this_redshift == None or \
-            self.log_mass_min == None or self.log_mass_max == None:            
-                mylog.error("All of these parameters need to be set:")
-                mylog.error("[omega_matter0, omega_lambda0, \
-hubble0, this_redshift, log_mass_min, log_mass_max]")
-                mylog.error("[%s,%s,%s,%s,%s,%s]" % (self.omega_matter0,\
-                self.omega_lambda0, self.hubble0, self.this_redshift,\
-                self.log_mass_min, self.log_mass_max))
-                return None
+                # If the user didn't specify mass min and max, set them from the halos
+                if log_mass_min is None:
+                    self.set_mass_from_halos("min_mass")
+                if log_mass_max is None:
+                    self.set_mass_from_halos("max_mass")
             # Do the calculations.
             self.sigmaM()
             self.dndm()
@@ -127,10 +221,23 @@
         If a halo file has been supplied, make a mass function for the simulated halos.
         """
         if halos_ds is not None:
+            # Used to check if a simulated halo mass funciton exists to write out
             self.make_simulated=True
+            # Calculate the simulated halo mass function
             self.create_sim_hmf()
 
     """
+    If we're making an analytic fit and have a halo dataset, but don't have log_mass_min 
+    or log_mass_max from the user, set it from the range of halo masses.
+    """
+    def set_mass_from_halos(self, which_limit):
+        data_source = self.halos_ds.all_data()
+        if which_limit is "min_mass":
+            self.log_mass_min = int(np.log10(np.amin(data_source['ParticleMassMsun'])))
+        if which_limit is "max_mass":
+            self.log_mass_max = int(np.log10(np.amax(data_source['ParticleMassMsun'])))+1
+    
+    """
     Here's where we create the halo mass functions from simulated halos
     """
     def create_sim_hmf(self):
@@ -139,12 +246,11 @@
         masses_sim = np.sort(data_source['ParticleMassMsun'])
         # Determine the size of the simulation volume in comoving Mpc**3
         sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')).prod()
-        # Get rid of the densities that correspond to repeated halo masses
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)
-        # We don't want repeated halo masses, and the uniques indices tell us which 
-        # densities are representative.
+        # We don't want repeated halo masses, and the unique indices tell us which values 
+        # correspond to distinct halo masses.
         self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
-        # Now make this an actual number density
+        # Now make this an actual number density of halos as a function of mass.
         self.n_cumulative_sim = n_cumulative_sim[unique_indices]/sim_volume
         # masses_sim and n_cumulative_sim are now set, but remember that the log10 quantities
         # are what is usually plotted for a halo mass function.
@@ -153,39 +259,53 @@
         """
         Writes out the halo mass functions to file(s) with prefix *prefix*.
         """
-        # First the analytic file.
-        if self.make_analytic==True and analytic:
-            fitname = prefix + '-analytic.dat'
-            fp = self.comm.write_on_root(fitname)
-            line = \
-            """#Columns:
+        # First the analytic file, check that analytic fit exists and was requested
+        if analytic:
+            if self.make_analytic:
+                fitname = prefix + '-analytic.dat'
+                fp = self.comm.write_on_root(fitname)
+                line = \
+                """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
 #3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
 #4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
-            fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dn_M_z[i], self.nofmz_cum[i])
                 fp.write(line)
-            fp.close()
-        if self.make_simulated==True and simulated:
-            haloname = prefix + '-simulated.dat'
-            fp = self.comm.write_on_root(haloname)
-            line = \
-            """#Columns:
+                for i in xrange(self.logmassarray.size - 1):
+                    line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
+                    self.dn_M_z[i], self.nofmz_cum[i])
+                    fp.write(line)
+                fp.close()
+            # If the analytic halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The analytic halo mass function was not created and cannot be written \
+out! Specify its creation with HaloMassFcn(make_analytic=True, other_args) \
+when creating the HaloMassFcn object.")
+        # Write out the simulated mass fucntion if it exists and was requested
+        if simulated:
+            if self.make_simulated:
+                haloname = prefix + '-simulated.dat'
+                fp = self.comm.write_on_root(haloname)
+                line = \
+                """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
 #3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
-            fp.write(line)
-            for i in xrange(self.masses_sim.size - 1):
-                line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
-                self.masses_sim[i]/self.hubble0,
-                self.n_cumulative_sim[i])
                 fp.write(line)
-            fp.close()
+                for i in xrange(self.masses_sim.size - 1):
+                    line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
+                    self.masses_sim[i]/self.hubble0,
+                    self.n_cumulative_sim[i])
+                    fp.write(line)
+                fp.close()
+            # If the simulated halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The simulated halo mass function was not created and cannot be written \
+out! Specify its creation by providing a loaded halo dataset with \
+HaloMassFcn(ds_halos=loaded_halo_dataset, other_args) when creating \
+the HaloMassFcn object.")
 
     def sigmaM(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/44d16296b607/
Changeset:   44d16296b607
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-01 21:13:14
Summary:     merged
Affected #:  32 files

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 doc/source/examining/loading_data.rst
--- a/doc/source/examining/loading_data.rst
+++ b/doc/source/examining/loading_data.rst
@@ -367,16 +367,11 @@
 yt also supports loading Tipsy data.  Many of its characteristics are similar
 to how Gadget data is loaded; specifically, it shares its definition of
 indexing and mesh-identification with that described in
-:ref:`particle-indexing-criteria`.  However, unlike Gadget, the Tipsy frontend
-has not yet implemented header specifications, field specifications, or
-particle type specifications.  *These are all excellent projects for new
-contributors!*
-
+:ref:`particle-indexing-criteria`.  
 
 .. code-block:: python
 
-    ds = TipsyDataset("./halo1e11_run1.00400", endian="<",
-                           field_dtypes = {"Coordinates": "d"})
+    ds = load("./halo1e11_run1.00400")
 
 .. _specifying-cosmology-tipsy:
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -119,6 +119,9 @@
     ImageArray, particle_filter, create_profile, \
     Profile1D, Profile2D, Profile3D
 
+# For backwards compatibility
+TimeSeriesData = deprecated_class(DatasetSeries)
+
 from yt.frontends.api import _frontend_container
 frontends = _frontend_container()
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -26,7 +26,6 @@
 from yt.utilities.cosmology import \
      Cosmology
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    only_on_root, \
     parallel_objects, \
     parallel_root_only
 from yt.visualization.image_writer import \

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -25,7 +25,6 @@
      HaloProfiler
 from yt.convenience import load
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    only_on_root, \
     parallel_objects, \
     parallel_root_only
 from yt.utilities.physical_constants import \

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -33,11 +33,9 @@
 from yt import units
 from yt.units.yt_array import YTQuantity
 import h5py
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-except ImportError:
-    pass
+from yt.frontends.fits.data_structures import ap
+pyfits = ap.pyfits
+pywcs = ap.pywcs
 
 comm = communication_system.communicators[-1]
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/analysis_modules/photon_simulator/spectral_models.py
--- a/yt/analysis_modules/photon_simulator/spectral_models.py
+++ b/yt/analysis_modules/photon_simulator/spectral_models.py
@@ -17,13 +17,14 @@
 from yt import units
 import h5py
 try:
-    import astropy.io.fits as pyfits
     import xspec
     from scipy.integrate import cumtrapz
     from scipy import stats        
 except ImportError:
     pass
-    
+from yt.frontends.fits.data_structures import ap
+pyfits = ap.pyfits
+
 from yt.utilities.physical_constants import hcgs, clight, erg_per_keV, amu_cgs
 
 hc = (hcgs*clight).in_units("keV*angstrom")

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -759,6 +759,7 @@
         self.field_data = YTFieldData()
         self.weight_field = weight_field
         self.field_units = {}
+        ParallelAnalysisInterface.__init__(self, comm=data_source.comm)
 
     def add_fields(self, fields):
         fields = ensure_list(fields)
@@ -792,7 +793,9 @@
     def _finalize_storage(self, fields, temp_storage):
         # We use our main comm here
         # This also will fill _field_data
-        # FIXME: Add parallelism and combining std stuff
+        temp_storage.values = self.comm.mpi_allreduce(temp_storage.values, op="sum", dtype="float64")
+        temp_storage.weight_values = self.comm.mpi_allreduce(temp_storage.weight_values, op="sum", dtype="float64")
+        temp_storage.used = self.comm.mpi_allreduce(temp_storage.used, op="sum", dtype="bool")
         blank = ~temp_storage.used
         self.used = temp_storage.used
         if self.weight_field is not None:

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -279,7 +279,10 @@
                 self, dataset_type=self.dataset_type)
             # Now we do things that we need an instantiated index for
             # ...first off, we create our field_info now.
+            oldsettings = np.geterr()
+            np.seterr(all='ignore')
             self.create_field_info()
+            np.seterr(**oldsettings)
         return self._instantiated_index
     
     _index_proxy = None
@@ -361,11 +364,16 @@
         # No string lookups here, we need an actual union.
         f = self.particle_fields_by_type
         fields = set_intersection([f[s] for s in union
-                                   if s in self.particle_types_raw])
+                                   if s in self.particle_types_raw
+                                   and len(f[s]) > 0])
         for field in fields:
             units = set([])
             for s in union:
-                units.add(self.field_units.get((s, field), ""))
+                # First we check our existing fields for units
+                funits = self._get_field_info(s, field).units
+                # Then we override with field_units settings.
+                funits = self.field_units.get((s, field), funits)
+                units.add(funits)
             if len(units) == 1:
                 self.field_units[union.name, field] = list(units)[0]
         self.particle_types += (union.name,)

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/fields/field_detector.py
--- a/yt/fields/field_detector.py
+++ b/yt/fields/field_detector.py
@@ -188,6 +188,8 @@
             return rv
         elif param == "fof_groups":
             return None
+        elif param == "mu":
+            return 1.0
         else:
             return 0.0
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -66,8 +66,9 @@
     def setup_fluid_fields(self):
         pass
 
-    def setup_particle_fields(self, ptype):
+    def setup_particle_fields(self, ptype, ftype='gas', num_neighbors=64 ):
         for f, (units, aliases, dn) in sorted(self.known_particle_fields):
+            units = self.pf.field_units.get((ptype, f), units)
             self.add_output_field((ptype, f),
                 units = units, particle_type = True, display_name = dn)
             if (ptype, f) not in self.field_list:
@@ -99,7 +100,9 @@
             self.add_output_field(field, 
                                   units = self.pf.field_units.get(field, ""),
                                   particle_type = True)
-        self.setup_smoothed_fields(ptype)
+        self.setup_smoothed_fields(ptype, 
+                                   num_neighbors=num_neighbors,
+                                   ftype=ftype)
 
     def setup_smoothed_fields(self, ptype, num_neighbors = 64, ftype = "gas"):
         # We can in principle compute this, but it is not yet implemented.

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -59,34 +59,46 @@
             * data[ftype,'density']
     return _density
 
-def add_species_field_by_density(registry, ftype, species):
+def add_species_field_by_density(registry, ftype, species, 
+                                 particle_type = False):
     """
     This takes a field registry, a fluid type, and a species name and then
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_density" already exists and refers to mass density.
     """
     registry.add_field((ftype, "%s_fraction" % species), 
-                        function = _create_fraction_func(ftype, species),
-                        units = "")
+                       function = _create_fraction_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "")
+
     registry.add_field((ftype, "%s_mass" % species),
-                        function = _create_mass_func(ftype, species),
-                        units = "g")
+                       function = _create_mass_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g")
+
     registry.add_field((ftype, "%s_number_density" % species),
-                        function = _create_number_density_func(ftype, species),
-                        units = "cm**-3")
+                       function = _create_number_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "cm**-3")
 
-def add_species_field_by_fraction(registry, ftype, species):
+def add_species_field_by_fraction(registry, ftype, species, 
+                                  particle_type = False):
     """
     This takes a field registry, a fluid type, and a species name and then
     adds the other fluids based on that.  This assumes that the field
     "SPECIES_fraction" already exists and refers to mass fraction.
     """
     registry.add_field((ftype, "%s_density" % species), 
-                        function = _create_density_func(ftype, species),
-                        units = "g/cm**3")
+                       function = _create_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g/cm**3")
+
     registry.add_field((ftype, "%s_mass" % species),
-                        function = _create_mass_func(ftype, species),
-                        units = "g")
+                       function = _create_mass_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "g")
+
     registry.add_field((ftype, "%s_number_density" % species),
-                        function = _create_number_density_func(ftype, species),
-                        units = "cm**-3")
+                       function = _create_number_density_func(ftype, species),
+                       particle_type = particle_type,
+                       units = "cm**-3")

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -90,8 +90,7 @@
 
     def _setup_dx(self):
         # has already been read in and stored in index
-        my_ind = self.id - self._id_offset
-        self.dds = self.index.level_dds[self.Level,:]
+        self.dds = self.index.pf.arr(self.index.level_dds[self.Level, :], 'code_length')
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -532,7 +532,7 @@
         self.dataset_type = dataset_type
         self.float_type = 'float64'
         self.parameter_file = weakref.proxy(pf) # for _obtain_enzo
-        self.float_type = self.enzo.index_information["GridLeftEdge"].dtype
+        self.float_type = self.enzo.hierarchy_information["GridLeftEdge"].dtype
         self.directory = os.getcwd()
         GridIndex.__init__(self, pf, dataset_type)
 
@@ -540,12 +540,12 @@
         pass
 
     def _count_grids(self):
-        self.num_grids = self.enzo.index_information["GridDimensions"].shape[0]
+        self.num_grids = self.enzo.hierarchy_information["GridDimensions"].shape[0]
 
     def _parse_index(self):
         self._copy_index_structure()
         mylog.debug("Copying reverse tree")
-        reverse_tree = self.enzo.index_information["GridParentIDs"].ravel().tolist()
+        reverse_tree = self.enzo.hierarchy_information["GridParentIDs"].ravel().tolist()
         # Initial setup:
         mylog.debug("Reconstructing parent-child relationships")
         grids = []
@@ -574,14 +574,14 @@
 
     def _copy_index_structure(self):
         # Dimensions are important!
-        self.grid_dimensions[:] = self.enzo.index_information["GridEndIndices"][:]
-        self.grid_dimensions -= self.enzo.index_information["GridStartIndices"][:]
+        self.grid_dimensions[:] = self.enzo.hierarchy_information["GridEndIndices"][:]
+        self.grid_dimensions -= self.enzo.hierarchy_information["GridStartIndices"][:]
         self.grid_dimensions += 1
-        self.grid_left_edge[:] = self.enzo.index_information["GridLeftEdge"][:]
-        self.grid_right_edge[:] = self.enzo.index_information["GridRightEdge"][:]
-        self.grid_levels[:] = self.enzo.index_information["GridLevels"][:]
-        self.grid_procs = self.enzo.index_information["GridProcs"].copy()
-        self.grid_particle_count[:] = self.enzo.index_information["GridNumberOfParticles"][:]
+        self.grid_left_edge[:] = self.enzo.hierarchy_information["GridLeftEdge"][:]
+        self.grid_right_edge[:] = self.enzo.hierarchy_information["GridRightEdge"][:]
+        self.grid_levels[:] = self.enzo.hierarchy_information["GridLevels"][:]
+        self.grid_procs = self.enzo.hierarchy_information["GridProcs"].copy()
+        self.grid_particle_count[:] = self.enzo.hierarchy_information["GridNumberOfParticles"][:]
 
     def save_data(self, *args, **kwargs):
         pass
@@ -829,12 +829,11 @@
         else:
             if "LengthUnits" in self.parameters:
                 length_unit = self.parameters["LengthUnits"]
-                mass_unit = self.parameters["MassUnits"]
+                mass_unit = self.parameters["DensityUnits"] * length_unit**3
                 time_unit = self.parameters["TimeUnits"]
             else:
                 mylog.warning("Setting 1.0 in code units to be 1.0 cm")
                 mylog.warning("Setting 1.0 in code units to be 1.0 s")
-                mylog.warning("Setting 1.0 in code units to be 1.0 g")
                 length_unit = mass_unit = time_unit = 1.0
 
             self.length_unit = self.quan(length_unit, "cm")
@@ -899,6 +898,7 @@
         return obj
 
     def __init__(self, parameter_override=None, conversion_override=None):
+        self.fluid_types += ("enzo",)
         if parameter_override is None: parameter_override = {}
         self._parameter_override = parameter_override
         if conversion_override is None: conversion_override = {}

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -233,37 +233,8 @@
                       slice(ghost_zones,-ghost_zones))
         BaseIOHandler.__init__(self, pf)
 
-    def _read_data_set(self, grid, field):
-        if grid.id not in self.grids_in_memory:
-            mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys())
-            raise KeyError
-        tr = self.grids_in_memory[grid.id][field]
-        # If it's particles, we copy.
-        if len(tr.shape) == 1: return tr.copy()
-        # New in-place unit conversion breaks if we don't copy first
-        return tr.swapaxes(0,2)[self.my_slice].copy()
-        # We don't do this, because we currently do not interpolate
-        coef1 = max((grid.Time - t1)/(grid.Time - t2), 0.0)
-        coef2 = 1.0 - coef1
-        t1 = enzo.yt_parameter_file["InitialTime"]
-        t2 = enzo.index_information["GridOldTimes"][grid.id]
-        return (coef1*self.grids_in_memory[grid.id][field] + \
-                coef2*self.old_grids_in_memory[grid.id][field])\
-                [self.my_slice]
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
     def _read_field_names(self, grid):
-        return self.grids_in_memory[grid.id].keys()
-
-    def _read_data_slice(self, grid, field, axis, coord):
-        sl = [slice(3,-3), slice(3,-3), slice(3,-3)]
-        sl[axis] = slice(coord + 3, coord + 4)
-        sl = tuple(reversed(sl))
-        tr = self.grids_in_memory[grid.id][field][sl].swapaxes(0,2)
-        # In-place unit conversion requires we return a copy
-        return tr.copy()
+        return [("enzo", field) for field in self.grids_in_memory[grid.id].keys()]
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         rv = {}
@@ -292,13 +263,10 @@
         for chunk in chunks:
             for g in chunk.objs:
                 if g.id not in self.grids_in_memory: continue
-
-                data = np.empty(g.ActiveDimensions[::-1], dtype="float64")
-                data_view = data.swapaxes(0,2)
                 for field in fields:
                     ftype, fname = field
-                    data_view = self.grids_in_memory[g.id][fname]
-                    nd = g.select(selector, data_view, rv[field], ind)
+                    data_view = self.grids_in_memory[g.id][fname][self.my_slice]
+                    ind += g.select(selector, data_view, rv[field], ind)
         return rv
 
     def _read_particle_coords(self, chunks, ptf):
@@ -333,10 +301,6 @@
                             data = data * g.dds.prod(dtype="f8")
                         yield (ptype, field), data[mask]
 
-    @property
-    def _read_exception(self):
-        return KeyError
-
 class IOHandlerPacked2D(IOHandlerPackedHDF5):
 
     _dataset_type = "enzo_packed_2d"

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/fits/data_structures.py
--- a/yt/frontends/fits/data_structures.py
+++ b/yt/frontends/fits/data_structures.py
@@ -10,13 +10,8 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-try:
-    import astropy.io.fits as pyfits
-    import astropy.wcs as pywcs
-except ImportError:
-    pass
-
 import stat
+import types
 import numpy as np
 import weakref
 import warnings
@@ -39,6 +34,37 @@
 from yt.utilities.decompose import \
     decompose_array, get_psize
 
+class astropy_imports:
+    _pyfits = None
+    @property
+    def pyfits(self):
+        if self._pyfits is None:
+            import astropy.io.fits as pyfits
+            self.log
+            self._pyfits = pyfits
+        return self._pyfits
+
+    _pywcs = None
+    @property
+    def pywcs(self):
+        if self._pywcs is None:
+            import astropy.wcs as pywcs
+            self.log
+            self._pywcs = pywcs
+        return self._pywcs
+
+    _log = None
+    @property
+    def log(self):
+        if self._log is None:
+            from astropy import log
+            if log.exception_logging_enabled():
+                log.disable_exception_logging()
+            self._log = log
+        return self._log
+
+ap = astropy_imports()
+
 angle_units = ["deg","arcsec","arcmin","mas"]
 all_units = angle_units + mpc_conversion.keys()
 
@@ -53,11 +79,11 @@
 
     def __repr__(self):
         return "FITSGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
-    
+
 class FITSHierarchy(GridIndex):
 
     grid = FITSGrid
-    
+
     def __init__(self,pf,dataset_type='fits'):
         self.dataset_type = dataset_type
         self.field_indexes = {}
@@ -77,10 +103,10 @@
         for h in self._handle[self.parameter_file.first_image:]:
             if h.is_image:
                 self.field_list.append(("fits", h.name.lower()))
-                        
+
     def _count_grids(self):
         self.num_grids = self.pf.nprocs
-                
+
     def _parse_index(self):
         f = self._handle # shortcut
         pf = self.parameter_file # shortcut
@@ -99,12 +125,12 @@
             self.grid_left_edge[0,:] = pf.domain_left_edge
             self.grid_right_edge[0,:] = pf.domain_right_edge
             self.grid_dimensions[0] = pf.domain_dimensions
-        
+
         self.grid_levels.flat[:] = 0
         self.grids = np.empty(self.num_grids, dtype='object')
         for i in xrange(self.num_grids):
             self.grids[i] = self.grid(i, self, self.grid_levels[i,0])
-        
+
     def _populate_grid_objects(self):
         for i in xrange(self.num_grids):
             self.grids[i]._prepare_grid()
@@ -113,7 +139,7 @@
 
     def _setup_derived_fields(self):
         super(FITSHierarchy, self)._setup_derived_fields()
-        [self.parameter_file.conversion_factors[field] 
+        [self.parameter_file.conversion_factors[field]
          for field in self.field_list]
         for field in self.field_list:
             if field not in self.derived_field_list:
@@ -123,8 +149,8 @@
             f = self.parameter_file.field_info[field]
             if f._function.func_name == "_TranslationFunc":
                 # Translating an already-converted field
-                self.parameter_file.conversion_factors[field] = 1.0 
-                
+                self.parameter_file.conversion_factors[field] = 1.0
+
     def _setup_data_io(self):
         self.io = io_registry[self.dataset_type](self.parameter_file)
 
@@ -133,7 +159,7 @@
     _field_info_class = FITSFieldInfo
     _dataset_type = "fits"
     _handle = None
-    
+
     def __init__(self, filename, dataset_type='fits',
                  primary_header = None,
                  sky_conversion = None,
@@ -143,24 +169,24 @@
         self.fluid_types += ("fits",)
         self.mask_nans = mask_nans
         self.nprocs = nprocs
-        if isinstance(filename, pyfits.HDUList):
+        if isinstance(filename, ap.pyfits.HDUList):
             self._handle = filename
             fname = filename.filename()
         else:
-            self._handle = pyfits.open(filename)
+            self._handle = ap.pyfits.open(filename)
             fname = filename
         for i, h in enumerate(self._handle):
             if h.is_image and h.data is not None:
                 self.first_image = i
                 break
-        
+
         if primary_header is None:
             self.primary_header = self._handle[self.first_image].header
         else:
             self.primary_header = primary_header
         self.shape = self._handle[self.first_image].shape
 
-        self.wcs = pywcs.WCS(header=self.primary_header)
+        self.wcs = ap.pywcs.WCS(header=self.primary_header)
 
         self.file_unit = None
         for i, unit in enumerate(self.wcs.wcs.cunit):
@@ -178,10 +204,11 @@
             self.new_unit = self.file_unit
             self.pixel_scale = self.wcs.wcs.cdelt[idx]
 
+        self.refine_by = 2
+
         Dataset.__init__(self, fname, dataset_type)
         self.storage_filename = storage_filename
-            
-        self.refine_by = 2
+
         # For plotting to APLpy
         self.hdu_list = self._handle
 
@@ -199,7 +226,7 @@
         self.length_unit = self.quan(length_factor,length_unit)
         self.mass_unit = self.quan(1.0, "g")
         self.time_unit = self.quan(1.0, "s")
-        self.velocity_unit = self.quan(1.0, "cm/s")        
+        self.velocity_unit = self.quan(1.0, "cm/s")
 
     def _parse_parameter_file(self):
         self.unique_identifier = \
@@ -217,14 +244,14 @@
         if self.dimensionality == 2:
             self.domain_dimensions = np.append(self.domain_dimensions,
                                                [int(1)])
-            
+
         self.domain_left_edge = np.array([0.5]*3)
         self.domain_right_edge = np.array([float(dim)+0.5 for dim in self.domain_dimensions])
 
         if self.dimensionality == 2:
             self.domain_left_edge[-1] = 0.5
             self.domain_right_edge[-1] = 1.5
-            
+
         # Get the simulation time
         try:
             self.current_time = self.parameters["time"]
@@ -232,7 +259,7 @@
             mylog.warning("Cannot find time")
             self.current_time = 0.0
             pass
-        
+
         # For now we'll ignore these
         self.periodicity = (False,)*3
         self.current_redshift = self.omega_lambda = self.omega_matter = \
@@ -243,8 +270,15 @@
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
+        if isinstance(args[0], types.StringTypes):
+            ext = args[0].rsplit(".", 1)[-1]
+            if ext.upper() == "GZ":
+                # We don't know for sure that there will be > 1
+                ext = args[0].rsplit(".", 1)[0].rsplit(".", 1)[-1]
+            if ext.upper() not in ("FITS", "FTS"):
+                return False
         try:
-            if isinstance(args[0], pyfits.HDUList):
+            if args[0].__class__.__name__ == "HDUList":
                 for h in args[0]:
                     if h.is_image and h.data is not None:
                         return True
@@ -253,7 +287,7 @@
         try:
             with warnings.catch_warnings():
                 warnings.filterwarnings('ignore', category=UserWarning, append=True)
-                fileh = pyfits.open(args[0])
+                fileh = ap.pyfits.open(args[0])
             for h in fileh:
                 if h.is_image and h.data is not None:
                     fileh.close()

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/fits/io.py
--- a/yt/frontends/fits/io.py
+++ b/yt/frontends/fits/io.py
@@ -11,10 +11,6 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
-try:
-    import astropy.io.fits as pyfits
-except ImportError:
-    pass
 
 from yt.utilities.math_utils import prec_accum
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/halo_catalogs/api.py
--- a/yt/frontends/halo_catalogs/api.py
+++ b/yt/frontends/halo_catalogs/api.py
@@ -14,12 +14,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-from halo_catalog.api import \
+from .halo_catalog.api import \
      HaloCatalogDataset, \
      IOHandlerHaloCatalogHDF5, \
      HaloCatalogFieldInfo
 
-from rockstar.api import \
+from .rockstar.api import \
       RockstarDataset, \
       IOHandlerRockstarBinary, \
       RockstarFieldInfo

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -209,7 +209,10 @@
         if "length" in unit_base:
             length_unit = unit_base["length"]
         elif "UnitLength_in_cm" in unit_base:
-            length_unit = (unit_base["UnitLength_in_cm"], "cm")
+            if self.cosmological_simulation == 0:
+                length_unit = (unit_base["UnitLength_in_cm"], "cm")
+            else:
+                length_unit = (unit_base["UnitLength_in_cm"], "cmcm/h")
         else:
             raise RuntimeError
         length_unit = _fix_unit_ordering(length_unit)
@@ -229,7 +232,10 @@
         if "mass" in unit_base:
             mass_unit = unit_base["mass"]
         elif "UnitMass_in_g" in unit_base:
-            mass_unit = (unit_base["UnitMass_in_g"], "g")
+            if self.cosmological_simulation == 0:
+                mass_unit = (unit_base["UnitMass_in_g"], "g")
+            else:
+                mass_unit = (unit_base["UnitMass_in_g"], "g/h")
         else:
             # Sane default
             mass_unit = (1.0, "1e10*Msun/h")
@@ -286,6 +292,8 @@
     _particle_mass_name = "Mass"
     _field_info_class = OWLSFieldInfo
 
+
+
     def _parse_parameter_file(self):
         handle = h5py.File(self.parameter_filename, mode="r")
         hvals = {}
@@ -351,6 +359,7 @@
         assert file_id == 0
         super(TipsyFile, self).__init__(pf, io, filename, file_id)
         io._create_dtypes(self)
+        io._update_domain(self)#Check automatically what the domain size is
 
 
 class TipsyDataset(ParticleDataset):
@@ -369,23 +378,25 @@
 
     def __init__(self, filename, dataset_type="tipsy",
                  field_dtypes=None,
-                 domain_left_edge=None,
-                 domain_right_edge=None,
                  unit_base=None,
                  cosmology_parameters=None,
                  parameter_file=None,
                  n_ref=64, over_refine_factor=1):
         self.n_ref = n_ref
         self.over_refine_factor = over_refine_factor
-        self.endian = self._validate_header(filename)[1]
+        success, self.endian = self._validate_header(filename)
+        if not success:
+            print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
+            print "%s != (%s == %s + %s + %s)" % (
+                self.parameters['nbodies'],
+                tot,
+                self.parameters['nsph'],
+                self.parameters['ndark'],
+                self.parameters['nstar'])
+            print "Often this can be fixed by changing the 'endian' parameter."
+            print "This defaults to '>' but may in fact be '<'."
+            raise RuntimeError
         self.storage_filename = None
-        if domain_left_edge is None:
-            domain_left_edge = np.zeros(3, "float64") - 0.5
-        if domain_right_edge is None:
-            domain_right_edge = np.zeros(3, "float64") + 0.5
-
-        self.domain_left_edge = np.array(domain_left_edge, dtype="float64")
-        self.domain_right_edge = np.array(domain_right_edge, dtype="float64")
 
         # My understanding is that dtypes are set on a field by field basis,
         # not on a (particle type, field) basis
@@ -421,6 +432,7 @@
         self.refine_by = 2
         self.parameters["HydroMethod"] = "sph"
 
+
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[stat.ST_CTIME])
 
@@ -457,21 +469,13 @@
         self.domain_dimensions = np.ones(3, "int32") * nz
         if self.parameters.get('bPeriodic', True):
             self.periodicity = (True, True, True)
+            # If we are periodic, that sets our domain width to either 1 or dPeriod.
+            self.domain_left_edge = np.zeros(3, "float64") - 0.5*self.parameters.get('dPeriod', 1)
+            self.domain_right_edge = np.zeros(3, "float64") + 0.5*self.parameters.get('dPeriod', 1)
         else:
             self.periodicity = (False, False, False)
-        tot = sum(self.parameters[ptype] for ptype
-                  in ('nsph', 'ndark', 'nstar'))
-        if tot != self.parameters['nbodies']:
-            print "SOMETHING HAS GONE WRONG.  NBODIES != SUM PARTICLES."
-            print "%s != (%s == %s + %s + %s)" % (
-                self.parameters['nbodies'],
-                tot,
-                self.parameters['nsph'],
-                self.parameters['ndark'],
-                self.parameters['nstar'])
-            print "Often this can be fixed by changing the 'endian' parameter."
-            print "This defaults to '>' but may in fact be '<'."
-            raise RuntimeError
+            self.domain_left_edge = None
+            self.domain_right_edge = None
         if self.parameters.get('bComove', False):
             self.cosmological_simulation = 1
             cosm = self._cosmology_parameters or {}
@@ -516,11 +520,20 @@
 
     @staticmethod
     def _validate_header(filename):
+        '''
+        This method automatically detects whether the tipsy file is big/little endian
+        and is not corrupt/invalid.  It returns a tuple of (Valid, endianswap) where
+        Valid is a boolean that is true if the file is a tipsy file, and endianswap is 
+        the endianness character '>' or '<'.
+        '''
         try:
             f = open(filename,'rb')
         except:
             return False, 1
-        fs = len(f.read())
+        try:
+            fs = len(f.read())
+        except IOError:
+            return False, 1
         f.seek(0)
         #Read in the header
         t, n, ndim, ng, nd, ns = struct.unpack("<diiiii", f.read(28))

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -14,17 +14,27 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import os
 import numpy as np
+import owls_ion_tables as oit
 
 from yt.funcs import *
+
 from yt.fields.field_info_container import \
     FieldInfoContainer
+
 from .definitions import \
     gadget_ptypes, \
     ghdf5_ptypes
 
-from yt.fields.species_fields import add_species_field_by_fraction
+from yt.config import ytcfg
+from yt.utilities.physical_constants import mh
+from yt.fields.species_fields import \
+    add_species_field_by_fraction, \
+    add_species_field_by_density
 
+from yt.fields.particle_fields import \
+    add_volume_weighted_smoothed_field
 
 
 # Here are helper functions for things like vector fields and so on.
@@ -90,24 +100,32 @@
 
 class OWLSFieldInfo(SPHFieldInfo):
 
-    _species_fractions = ['H_fraction', 'He_fraction', 'C_fraction',
-                          'N_fraction', 'O_fraction', 'Ne_fraction',
-                          'Mg_fraction', 'Si_fraction', 'Fe_fraction']
+    _ions = ("c1", "c2", "c3", "c4", "c5", "c6",
+             "fe2", "fe17", "h1", "he1", "he2", "mg1", "mg2", "n2", 
+             "n3", "n4", "n5", "n6", "n7", "ne8", "ne9", "ne10", "o1", 
+             "o6", "o7", "o8", "si2", "si3", "si4", "si13")
 
-    # override
-    #--------------------------------------------------------------
+    _elements = ("H", "He", "C", "N", "O", "Ne", "Mg", "Si", "Fe")
+
+    _num_neighbors = 48
+
+    _add_elements = ("PartType0", "PartType4")
+
+    _add_ions = ("PartType0")
+
+
     def __init__(self, *args, **kwargs):
         
         new_particle_fields = (
-            ('Hydrogen', ('', ['H_fraction'], None)),
-            ('Helium', ('', ['He_fraction'], None)),
-            ('Carbon', ('', ['C_fraction'], None)),
-            ('Nitrogen', ('', ['N_fraction'], None)),
-            ('Oxygen', ('', ['O_fraction'], None)),
-            ('Neon', ('', ['Ne_fraction'], None)),
-            ('Magnesium', ('', ['Mg_fraction'], None)),
-            ('Silicon', ('', ['Si_fraction'], None)),
-            ('Iron', ('', ['Fe_fraction'], None))
+            ("Hydrogen", ("", ["H_fraction"], None)),
+            ("Helium", ("", ["He_fraction"], None)),
+            ("Carbon", ("", ["C_fraction"], None)),
+            ("Nitrogen", ("", ["N_fraction"], None)),
+            ("Oxygen", ("", ["O_fraction"], None)),
+            ("Neon", ("", ["Ne_fraction"], None)),
+            ("Magnesium", ("", ["Mg_fraction"], None)),
+            ("Silicon", ("", ["Si_fraction"], None)),
+            ("Iron", ("", ["Fe_fraction"], None))
             )
 
         self.known_particle_fields += new_particle_fields
@@ -115,9 +133,255 @@
         super(OWLSFieldInfo,self).__init__( *args, **kwargs )
 
 
+
+    def setup_particle_fields(self, ptype):
+        """ additional particle fields derived from those in snapshot.
+        we also need to add the smoothed fields here b/c setup_fluid_fields
+        is called before setup_particle_fields. """ 
+
+        smoothed_suffixes = ("_number_density", "_density", "_mass")
+
+
+
+        # we add particle element fields for stars and gas
+        #-----------------------------------------------------
+        if ptype in self._add_elements:
+
+
+            # this adds the particle element fields
+            # X_density, X_mass, and X_number_density
+            # where X is an item of self._elements.
+            # X_fraction are defined in snapshot
+            #-----------------------------------------------
+            for s in self._elements:
+                add_species_field_by_fraction(self, ptype, s,
+                                              particle_type=True)
+
+        # this needs to be called after the call to 
+        # add_species_field_by_fraction for some reason ...
+        # not sure why yet. 
+        #-------------------------------------------------------
+        if ptype == 'PartType0':
+            ftype='gas'
+        elif ptype == 'PartType1':
+            ftype='dm'
+        elif ptype == 'PartType4':
+            ftype='star'
+        elif ptype == 'all':
+            ftype='all'
         
+        super(OWLSFieldInfo,self).setup_particle_fields(
+            ptype, num_neighbors=self._num_neighbors, ftype=ftype)
+
+
+        # and now we add the smoothed versions for PartType0
+        #-----------------------------------------------------
+        if ptype == 'PartType0':
+
+            loaded = []
+            for s in self._elements:
+                for sfx in smoothed_suffixes:
+                    fname = s + sfx
+                    fn = add_volume_weighted_smoothed_field( 
+                        ptype, "particle_position", "particle_mass",
+                        "smoothing_length", "density", fname, self,
+                        self._num_neighbors)
+                    loaded += fn
+
+                    self.alias(("gas", fname), fn[0])
+
+            self._show_field_errors += loaded
+            self.find_dependencies(loaded)
+
+
+            # we only add ion fields for gas.  this takes some 
+            # time as the ion abundances have to be interpolated
+            # from cloudy tables (optically thin)
+            #-----------------------------------------------------
+    
+
+            # this defines the ion density on particles
+            # X_density for all items in self._ions
+            #-----------------------------------------------
+            self.setup_gas_ion_density_particle_fields( ptype )
+
+            # this adds the rest of the ion particle fields
+            # X_fraction, X_mass, X_number_density
+            #-----------------------------------------------
+            for ion in self._ions:
+
+                # construct yt name for ion
+                #---------------------------------------------------
+                if ion[0:2].isalpha():
+                    symbol = ion[0:2].capitalize()
+                    roman = int(ion[2:])
+                else:
+                    symbol = ion[0:1].capitalize()
+                    roman = int(ion[1:])
+
+                pstr = "_p" + str(roman-1)
+                yt_ion = symbol + pstr
+
+                # add particle field
+                #---------------------------------------------------
+                add_species_field_by_density(self, ptype, yt_ion,
+                                             particle_type=True)
+
+
+            # add smoothed ion fields
+            #-----------------------------------------------
+            for ion in self._ions:
+
+                # construct yt name for ion
+                #---------------------------------------------------
+                if ion[0:2].isalpha():
+                    symbol = ion[0:2].capitalize()
+                    roman = int(ion[2:])
+                else:
+                    symbol = ion[0:1].capitalize()
+                    roman = int(ion[1:])
+
+                pstr = "_p" + str(roman-1)
+                yt_ion = symbol + pstr
+
+                loaded = []
+                for sfx in smoothed_suffixes:
+                    fname = yt_ion + sfx
+                    fn = add_volume_weighted_smoothed_field( 
+                        ptype, "particle_position", "particle_mass",
+                        "smoothing_length", "density", fname, self,
+                        self._num_neighbors)
+                    loaded += fn
+
+                    self.alias(("gas", fname), fn[0])
+
+                self._show_field_errors += loaded
+                self.find_dependencies(loaded)
+
+
+
+    def setup_gas_ion_density_particle_fields( self, ptype ):
+        """ Sets up particle fields for gas ion densities. """ 
+
+        # loop over all ions and make fields
+        #----------------------------------------------
+        for ion in self._ions:
+
+            # construct yt name for ion
+            #---------------------------------------------------
+            if ion[0:2].isalpha():
+                symbol = ion[0:2].capitalize()
+                roman = int(ion[2:])
+            else:
+                symbol = ion[0:1].capitalize()
+                roman = int(ion[1:])
+
+            pstr = "_p" + str(roman-1)
+            yt_ion = symbol + pstr
+            ftype = ptype
+
+            # add ion density field for particles
+            #---------------------------------------------------
+            fname = yt_ion + '_density'
+            dens_func = self._create_ion_density_func( ftype, ion )
+            self.add_field( (ftype, fname),
+                            function = dens_func, 
+                            units="g/cm**3",
+                            particle_type=True )            
+            self._show_field_errors.append( (ftype,fname) )
+
+
+
+        
+    def _create_ion_density_func( self, ftype, ion ):
+        """ returns a function that calculates the ion density of a particle. 
+        """ 
+
+        def _ion_density(field, data):
+
+            # get element symbol from ion string. ion string will 
+            # be a member of the tuple _ions (i.e. si13)
+            #--------------------------------------------------------
+            if ion[0:2].isalpha():
+                symbol = ion[0:2].capitalize()
+            else:
+                symbol = ion[0:1].capitalize()
+
+            # mass fraction for the element
+            #--------------------------------------------------------
+            m_frac = data[ftype, symbol+"_fraction"]
+
+            # get nH and T for lookup
+            #--------------------------------------------------------
+            log_nH = np.log10( data["PartType0", "H_number_density"] )
+            log_T = np.log10( data["PartType0", "Temperature"] )
+
+            # get name of owls_ion_file for given ion
+            #--------------------------------------------------------
+            owls_ion_path = self._get_owls_ion_data_dir()
+            fname = os.path.join( owls_ion_path, ion+".hdf5" )
+
+            # create ionization table for this redshift
+            #--------------------------------------------------------
+            itab = oit.IonTableOWLS( fname )
+            itab.set_iz( data.pf.current_redshift )
+
+            # find ion balance using log nH and log T
+            #--------------------------------------------------------
+            i_frac = itab.interp( log_nH, log_T )
+            return data[ftype,"Density"] * m_frac * i_frac 
+        
+        return _ion_density
+
+
+
+
+
+    # this function sets up the X_mass, X_density, X_fraction, and
+    # X_number_density fields where X is the name of an OWLS element.
+    #-------------------------------------------------------------
     def setup_fluid_fields(self):
-        # here species_name is "H", "He", etc
-        for s in self._species_fractions:
-            species_name = s.split('_')[0]
-            add_species_field_by_fraction(self, "gas", species_name)
+
+        return
+
+
+
+    # this function returns the owls_ion_data directory. if it doesn't
+    # exist it will download the data from http://yt-project.org/data
+    #-------------------------------------------------------------
+    def _get_owls_ion_data_dir(self):
+
+        txt = "Attempting to download ~ 30 Mb of owls ion data from %s to %s."
+        data_file = "owls_ion_data.tar.gz"
+        data_url = "http://yt-project.org/data"
+
+        # get test_data_dir from yt config (ytcgf)
+        #----------------------------------------------
+        tdir = ytcfg.get("yt","test_data_dir")
+
+        # set download destination to tdir or ./ if tdir isnt defined
+        #----------------------------------------------
+        if tdir == "/does/not/exist":
+            data_dir = "./"
+        else:
+            data_dir = tdir            
+
+
+        # check for owls_ion_data directory in data_dir
+        # if not there download the tarball and untar it
+        #----------------------------------------------
+        owls_ion_path = os.path.join( data_dir, "owls_ion_data" )
+
+        if not os.path.exists(owls_ion_path):
+            mylog.info(txt % (data_url, data_dir))                    
+            fname = data_dir + "/" + data_file
+            fn = download_file(os.path.join(data_url, data_file), fname)
+
+            cmnd = "cd " + data_dir + "; " + "tar xf " + data_file
+            os.system(cmnd)
+
+
+        if not os.path.exists(owls_ion_path):
+            raise RuntimeError, "Failed to download owls ion data."
+
+        return owls_ion_path

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -53,7 +53,7 @@
     _vector_fields = ("Coordinates", "Velocity", "Velocities")
     _known_ptypes = ghdf5_ptypes
     _var_mass = None
-    _element_fields = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
+    _element_names = ('Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 
                        'Neon', 'Magnesium', 'Silicon', 'Iron' )
 
 
@@ -110,7 +110,7 @@
                         ind = self._known_ptypes.index(ptype) 
                         data[:] = self.pf["Massarr"][ind]
 
-                    elif field in self._element_fields:
+                    elif field in self._element_names:
                         rfield = 'ElementAbundance/' + field
                         data = g[rfield][:][mask,...]
 
@@ -418,7 +418,8 @@
 
     def _read_aux_fields(self, field, mask, data_file):
         """
-        Read in auxiliary files from gasoline/pkdgrav 
+        Read in auxiliary files from gasoline/pkdgrav.
+        This method will automatically detect the format of the file.
         """
         filename = data_file.filename+'.'+field
         dtype = None
@@ -528,6 +529,41 @@
                     yield (ptype, field), tf.pop(field)
             f.close()
 
+    def _update_domain(self, data_file):
+        '''
+        This method is used to determine the size needed for a box that will 
+        bound the particles.  It simply finds the largest position of the
+        whole set of particles, and sets the domain to +/- that value.
+        '''
+        pf = data_file.pf
+        ind = 0
+        # Check to make sure that the domain hasn't already been set
+        # by the parameter file 
+        if pf.domain_left_edge is not None and pf.domain_right_edge is not None:
+            return
+        with open(data_file.filename, "rb") as f:
+            f.seek(pf._header_offset)
+            for iptype, ptype in enumerate(self._ptypes):
+                # We'll just add the individual types separately
+                count = data_file.total_particles[ptype]
+                if count == 0: continue
+                start, stop = ind, ind + count
+                while ind < stop:
+                    c = min(CHUNKSIZE, stop - ind)
+                    pp = np.fromfile(f, dtype = self._pdtypes[ptype],
+                                     count = c)
+                    for ax in 'xyz':
+                        mi = pp["Coordinates"][ax].min()
+                        ma = pp["Coordinates"][ax].max()
+                        outlier = YTArray(np.max(np.abs((mi,ma))), 'code_length')
+                    if outlier > pf.domain_right_edge or -outlier < pf.domain_left_edge:
+                        pf.domain_left_edge = -outlier
+                        pf.domain_right_edge = outlier
+                    ind += c
+        pf.domain_left_edge = np.ones(3)*pf.domain_left_edge
+        pf.domain_right_edge = np.ones(3)*pf.domain_right_edge
+        pf.domain_width = np.ones(3)*2*pf.domain_right_edge
+
     def _initialize_index(self, data_file, regions):
         pf = data_file.pf
         morton = np.empty(sum(data_file.total_particles.values()),

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/sph/owls_ion_tables.py
--- /dev/null
+++ b/yt/frontends/sph/owls_ion_tables.py
@@ -0,0 +1,224 @@
+""" 
+OWLS ion tables
+
+A module to handle the HM01 UV background spectra and ionization data from the
+OWLS photoionization equilibrium lookup tables. 
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import sys
+import h5py
+import numpy as np
+
+
+
+
+def h5rd( fname, path, dtype=None ):
+    """ Read Data. Return a dataset located at <path> in file <fname> as
+    a numpy array. 
+    e.g. rd( fname, '/PartType0/Coordinates' ). """
+
+    data = None
+    with h5py.File( fname, 'r' ) as h5f:
+        ds = h5f[path]
+        if dtype == None:
+            dtype = ds.dtype
+        data = np.zeros( ds.shape, dtype=dtype )
+        data = ds.value
+    return data
+
+
+
+class IonTableSpectrum:
+
+    """ A class to handle the HM01 spectra in the OWLS ionization tables. """
+
+    def __init__(self, ion_file):
+
+        where = '/header/spectrum/gammahi'
+        self.GH1 = h5rd( ion_file, where ) # GH1[1/s]
+
+        where = '/header/spectrum/logenergy_ryd'
+        self.logryd = h5rd( ion_file, where ) # E[ryd]  
+
+        where = '/header/spectrum/logflux'
+        self.logflux = h5rd( ion_file, where ) # J[ergs/s/Hz/Sr/cm^2] 
+
+        where = '/header/spectrum/redshift'
+        self.z = h5rd( ion_file, where ) # z
+
+
+
+    def return_table_GH1_at_z(self,z):
+
+        # find redshift indices
+        #-----------------------------------------------------------------
+        i_zlo = np.argmin( np.abs( self.z - z ) )
+        if self.z[i_zlo] < z:
+            i_zhi = i_zlo + 1
+        else:
+            i_zhi = i_zlo
+            i_zlo = i_zlo - 1
+    
+        z_frac = (z - self.z[i_zlo]) / (self.z[i_zhi] - self.z[i_zlo])
+   
+        # find GH1 from table
+        #-----------------------------------------------------------------
+        logGH1_all = np.log10( self.GH1 )
+        dlog_GH1 = logGH1_all[i_zhi] - logGH1_all[i_zlo]
+
+        logGH1_table = logGH1_all[i_zlo] + z_frac * dlog_GH1
+        GH1_table = 10.0**logGH1_table
+
+        return GH1_table
+    
+
+
+
+class IonTableOWLS:
+
+    """ A class to handle OWLS ionization tables. """
+
+    DELTA_nH = 0.25
+    DELTA_T = 0.1
+    
+    def __init__(self, ion_file):
+
+        self.ion_file = ion_file
+
+        # ionbal is indexed like [nH, T, z]
+        # nH and T are log quantities
+        #---------------------------------------------------------------
+        self.nH = h5rd( ion_file, '/logd' )         # log nH [cm^-3]
+        self.T = h5rd( ion_file, '/logt' )          # log T [K]
+        self.z = h5rd( ion_file, '/redshift' )      # z
+
+        # read the ionization fractions
+        # linear values stored in file so take log here
+        # ionbal is the ionization balance (i.e. fraction) 
+        #---------------------------------------------------------------
+        self.ionbal = h5rd( ion_file, '/ionbal' ).astype(np.float64)    
+        self.ionbal_orig = self.ionbal.copy()
+
+        ipositive = np.where( self.ionbal > 0.0 )
+        izero = np.where( self.ionbal <= 0.0 )
+        self.ionbal[izero] = self.ionbal[ipositive].min()
+
+        self.ionbal = np.log10( self.ionbal )
+
+
+        # load in background spectrum
+        #---------------------------------------------------------------
+        self.spectrum = IonTableSpectrum( ion_file ) 
+
+        # calculate the spacing along each dimension
+        #---------------------------------------------------------------
+        self.dnH = self.nH[1:] - self.nH[0:-1]
+        self.dT = self.T[1:] - self.T[0:-1]
+        self.dz = self.z[1:] - self.z[0:-1]
+
+        self.order_str = '[log nH, log T, z]'
+
+
+            
+        
+                                                
+    # sets iz and fz
+    #-----------------------------------------------------
+    def set_iz( self, z ):
+
+        if z <= self.z[0]:
+            self.iz = 0
+            self.fz = 0.0
+        elif z >= self.z[-1]:
+            self.iz = len(self.z) - 2
+            self.fz = 1.0
+        else:
+            for iz in range( len(self.z)-1 ):
+                if z < self.z[iz+1]:
+                    self.iz = iz
+                    self.fz = ( z - self.z[iz] ) / self.dz[iz]
+                    break
+
+        
+
+    # interpolate the table at a fixed redshift for the input
+    # values of nH and T ( input should be log ).  A simple    
+    # tri-linear interpolation is used.  
+    #-----------------------------------------------------
+    def interp( self, nH, T ):
+
+        nH = np.array( nH )
+        T  = np.array( T )
+
+        if nH.size != T.size:
+            print ' array size mismatch !!! '
+            sys.exit(1)
+        
+        # field discovery will have nH.size == 1 and T.size == 1
+        # in that case we simply return 1.0
+
+        if nH.size == 1 and T.size == 1:
+            ionfrac = 1.0
+            return ionfrac
+
+
+        # find inH and fnH
+        #-----------------------------------------------------
+        inH = np.int32( ( nH - self.nH[0] ) / self.DELTA_nH )
+        fnH = ( nH - self.nH[inH] ) / self.dnH[inH]
+
+        indx = np.where( inH < 0 )[0]
+        if len(indx) > 0:
+            inH[indx] = 0
+            fnH[indx] = 0.0
+
+        indx = np.where( inH >= len(nH) )[0]
+        if len(indx) > 0:
+            inH[indx] = len(nH)-2
+            fnH[indx] = 1.0
+
+
+        # find iT and fT
+        #-----------------------------------------------------
+        iT = np.int32( ( T - self.T[0] ) / self.DELTA_T )
+        fT = ( T - self.T[iT] ) / self.dT[iT]
+
+        indx = np.where( iT < 0 )[0]
+        if len(indx) > 0:
+            iT[indx] = 0
+            fT[indx] = 0.0
+
+        indx = np.where( iT >= len(T) )[0]
+        if len(indx) > 0:
+            iT[indx] = len(T)-2
+            fT[indx] = 1.0
+
+
+        iz = self.iz
+        fz = self.fz
+                   
+        # calculate interpolated value
+        # use tri-linear interpolation on the log values
+        #-----------------------------------------------------
+
+        ionfrac = self.ionbal[inH,   iT,   iz  ] * (1-fnH) * (1-fT) * (1-fz) + \
+                  self.ionbal[inH+1, iT,   iz  ] * (fnH)   * (1-fT) * (1-fz) + \
+                  self.ionbal[inH,   iT+1, iz  ] * (1-fnH) * (fT)   * (1-fz) + \
+                  self.ionbal[inH,   iT,   iz+1] * (1-fnH) * (1-fT) * (fz)   + \
+                  self.ionbal[inH+1, iT,   iz+1] * (fnH)   * (1-fT) * (fz)   + \
+                  self.ionbal[inH,   iT+1, iz+1] * (1-fnH) * (fT)   * (fz)   + \
+                  self.ionbal[inH+1, iT+1, iz]   * (fnH)   * (fT)   * (1-fz) + \
+                  self.ionbal[inH+1, iT+1, iz+1] * (fnH)   * (fT)   * (fz)
+
+        return 10**ionfrac

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -498,6 +498,13 @@
             raise RuntimeError
         new_data[new_field] = data[field]
         field_units[new_field] = field_units.pop(field)
+        known_fields = StreamFieldInfo.known_particle_fields \
+                     + StreamFieldInfo.known_other_fields
+        # We do not want to override any of the known ones, if it's not
+        # overridden here.
+        if any(f[0] == new_field[1] for f in known_fields) and \
+           field_units[new_field] == "":
+            field_units.pop(new_field)
     data = new_data
     return field_units, data
 
@@ -995,7 +1002,7 @@
 
     """
 
-    domain_dimensions = np.ones(3, "int32") * 2
+    domain_dimensions = np.ones(3, "int32") * (1<<over_refine_factor)
     nprocs = 1
     if bbox is None:
         bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -52,13 +52,7 @@
             raise NotImplementedError
         rv = {}
         for field in fields:
-            ftype, fname = field
-            try:
-                field_units = self.field_units[fname]
-            except KeyError:
-                field_units = self.field_units[field]
-            rv[field] = self.pf.arr(np.empty(size, dtype="float64"),
-                                    field_units)
+            rv[field] = self.pf.arr(np.empty(size, dtype="float64"))
         ng = sum(len(c.objs) for c in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [f2 for f1, f2 in fields], ng)

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -504,9 +504,8 @@
         cdef OctVisitorData data
         self.setup_data(&data, -1)
         data.oref = 1
-        data.nz = 8
         cdef np.ndarray[np.uint8_t, ndim=1] ref_mask
-        ref_mask = np.zeros(self.nocts * 8, dtype="uint8") - 1
+        ref_mask = np.zeros(self.nocts * data.nz, dtype="uint8") - 1
         cdef void *p[2]
         cdef np.uint8_t ad = int(always_descend)
         p[0] = <void *> &ad
@@ -514,8 +513,6 @@
         data.array = p
         # Enforce partial_coverage here
         self.visit_all_octs(selector, oct_visitors.store_octree, &data, 1)
-        if always_descend:
-            ref_mask = ref_mask[:data.index-1]
         header['octree'] = ref_mask
         return header
 

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -185,6 +185,7 @@
         return
     data.last = o.domain_ind
     if o.children == NULL or o.children[ii] == NULL:
+        # Not refined.
         res = 0
     else:
         res = 1

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -214,6 +214,12 @@
         # Now we visit all our children.  We subtract off sdds for the first
         # pass because we center it on the first cell.
         cdef int iter = 1 - visit_covered # 2 if 1, 1 if 0.
+        # So the order here goes like so.  If visit_covered is 1, which usually
+        # comes from "partial_coverage", we visit the components of a zone even
+        # if it has children.  But in general, the first iteration through, we
+        # visit each cell.  This means that only if visit_covered is true do we
+        # visit potentially covered cells.  The next time through, we visit
+        # child cells.
         while iter < 2:
             spos[0] = pos[0] - sdds[0]/2.0
             for i in range(2):

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/utilities/fits_image.py
--- a/yt/utilities/fits_image.py
+++ b/yt/utilities/fits_image.py
@@ -14,14 +14,11 @@
 from yt.funcs import mylog, iterable
 from yt.visualization.fixed_resolution import FixedResolutionBuffer
 from yt.data_objects.construction_data_containers import YTCoveringGridBase
+from yt.frontends.fits.data_structures import ap
+pyfits = ap.pyfits
+pywcs = ap.pywcs
 
-try:
-    from astropy.io.fits import HDUList, ImageHDU
-    from astropy import wcs as pywcs
-except ImportError:
-    HDUList = object
-
-class FITSImageBuffer(HDUList):
+class FITSImageBuffer(pyfits.HDUList):
 
     def __init__(self, data, fields=None, units="cm",
                  center=None, scale=None, wcs=None):
@@ -69,7 +66,7 @@
         >>> f_deg.writeto("temp.fits")
         """
         
-        super(HDUList, self).__init__()
+        super(pyfits.HDUList, self).__init__()
 
         if isinstance(fields, basestring): fields = [fields]
             
@@ -96,10 +93,10 @@
             if key not in exclude_fields:
                 mylog.info("Making a FITS image of field %s" % (key))
                 if first:
-                    hdu = PrimaryHDU(np.array(img_data[key]))
+                    hdu = pyfits.PrimaryHDU(np.array(img_data[key]))
                     hdu.name = key
                 else:
-                    hdu = ImageHDU(np.array(img_data[key]), name=key)
+                    hdu = pyfits.ImageHDU(np.array(img_data[key]), name=key)
                 self.append(hdu)
 
         self.dimensionality = len(self[0].data.shape)
@@ -210,7 +207,7 @@
         return FITSImageBuffer(new_buffer, wcs=new_wcs)
 
     def writeto(self, fileobj, **kwargs):
-        HDUList(self).writeto(fileobj, **kwargs)
+        pyfits.HDUList(self).writeto(fileobj, **kwargs)
         
     @property
     def shape(self):

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -100,6 +100,7 @@
     config.add_extension("origami", 
                 ["yt/utilities/lib/origami.pyx",
                  "yt/utilities/lib/origami_tags.c"],
+                include_dirs=["yt/utilities/lib/"],
                 depends=["yt/utilities/lib/origami_tags.h"])
     config.add_extension("image_utilities", 
                          ["yt/utilities/lib/image_utilities.pyx"],
@@ -129,8 +130,7 @@
                libraries=["m"], 
                extra_compile_args=omp_args,
                extra_link_args=omp_args,
-               depends = ["yt/utilities/lib/VolumeIntegrator.pyx",
-                          "yt/utilities/lib/fp_utils.pxd",
+               depends = ["yt/utilities/lib/fp_utils.pxd",
                           "yt/utilities/lib/kdtree.h",
                           "yt/utilities/lib/FixedInterpolator.h",
                           "yt/utilities/lib/fixed_interpolator.pxd",

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -13,14 +13,18 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import cPickle
 import cStringIO
 import itertools
 import logging
 import numpy as np
 import sys
+import os
+import traceback
+import types
+from functools import wraps
 
-from yt.funcs import *
+from yt.funcs import \
+    ensure_list, iterable, traceback_writer_hook
 
 from yt.config import ytcfg
 from yt.utilities.definitions import \
@@ -30,8 +34,13 @@
     QuadTree, merge_quadtrees
 from yt.units.yt_array import YTArray
 from yt.units.unit_registry import UnitRegistry
+from yt.utilities.exceptions import YTNoDataInObjectError
+from yt.utilities.logger import ytLogger as mylog
 
-parallel_capable = ytcfg.getboolean("yt", "__parallel")
+# We default to *no* parallelism unless it gets turned on, in which case this
+# will be changed.
+MPI = None
+parallel_capable = False
 
 dtype_names = dict(
         float32 = "MPI.FLOAT",
@@ -48,14 +57,21 @@
 
 # Set up translation table and import things
 
-exe_name = os.path.basename(sys.executable)
 def enable_parallelism():
-    global parallel_capable
-    from mpi4py import MPI
+    global parallel_capable, MPI
+    try:
+        from mpi4py import MPI as _MPI
+    except ImportError:
+        mylog.info("mpi4py was not found. Disabling parallel computation")
+        parallel_capable = False
+        return
+    MPI = _MPI
+    exe_name = os.path.basename(sys.executable)
     parallel_capable = (MPI.COMM_WORLD.size > 1)
     if not parallel_capable: return False
     mylog.info("Global parallel computation enabled: %s / %s",
                MPI.COMM_WORLD.rank, MPI.COMM_WORLD.size)
+    communication_system.push(MPI.COMM_WORLD)
     ytcfg["yt","__global_parallel_rank"] = str(MPI.COMM_WORLD.rank)
     ytcfg["yt","__global_parallel_size"] = str(MPI.COMM_WORLD.size)
     ytcfg["yt","__parallel"] = "True"
@@ -126,7 +142,7 @@
 
     def __iter__(self):
         for obj in self._objs: yield obj
-        
+
 class ParallelObjectIterator(ObjectIterator):
     """
     This takes an object, *pobj*, that implements ParallelAnalysisInterface,
@@ -149,7 +165,7 @@
                                 np.arange(len(self._objs)), self._skip)[self._offset]
             else:
                 self.my_obj_ids = np.arange(len(self._objs))[self._offset::self._skip]
-        
+
     def __iter__(self):
         for gid in self.my_obj_ids:
             yield self._objs[gid]
@@ -298,7 +314,7 @@
         self.ranks = range(self.size)
         self.available_ranks = range(self.size)
         self.workgroups = []
-    
+
     def add_workgroup(self, size=None, ranks=None, name=None):
         if size is None:
             size = len(self.available_ranks)
@@ -309,14 +325,14 @@
         if ranks is None:
             ranks = [self.available_ranks.pop(0) for i in range(size)]
         # Default name to the workgroup number.
-        if name is None: 
+        if name is None:
             name = str(len(self.workgroups))
         group = self.comm.comm.Get_group().Incl(ranks)
         new_comm = self.comm.comm.Create(group)
         if self.comm.rank in ranks:
             communication_system.communicators.append(Communicator(new_comm))
         self.workgroups.append(Workgroup(len(ranks), ranks, new_comm, name))
-    
+
     def free_workgroup(self, workgroup):
         # If you want to actually delete the workgroup you will need to
         # pop it out of the self.workgroups list so you don't have references
@@ -324,7 +340,7 @@
         for i in workgroup.ranks:
             if self.comm.rank == i:
                 communication_system.communicators.pop()
-            self.available_ranks.append(i) 
+            self.available_ranks.append(i)
         self.available_ranks.sort()
 
     def free_all(self):
@@ -427,7 +443,7 @@
                                                storage=storage):
             yield my_obj
         return
-    
+
     if not parallel_capable:
         njobs = 1
     my_communicator = communication_system.communicators[-1]
@@ -507,13 +523,13 @@
     Here is a simple example of a ring loop around a set of integers, with a
     custom dtype.
 
-    >>> dt = numpy.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
+    >>> dt = np.dtype([('x', 'float64'), ('y', 'float64'), ('z', 'float64')])
     >>> def gfunc(o):
-    ...     numpy.random.seed(o)
+    ...     np.random.seed(o)
     ...     rv = np.empty(1000, dtype=dt)
-    ...     rv['x'] = numpy.random.random(1000)
-    ...     rv['y'] = numpy.random.random(1000)
-    ...     rv['z'] = numpy.random.random(1000)
+    ...     rv['x'] = np.random.random(1000)
+    ...     rv['y'] = np.random.random(1000)
+    ...     rv['z'] = np.random.random(1000)
     ...     return rv
     ...
     >>> obj = range(8)
@@ -583,10 +599,7 @@
     communicators = []
 
     def __init__(self):
-        if parallel_capable:
-            self.communicators.append(Communicator(MPI.COMM_WORLD))
-        else:
-            self.communicators.append(Communicator(None))
+        self.communicators.append(Communicator(None))
 
     def push(self, new_comm):
         if not isinstance(new_comm, Communicator):
@@ -724,7 +737,7 @@
             return data
         elif datatype == "list" and op == "cat":
             recv_data = self.comm.allgather(data)
-            # Now flatten into a single list, since this 
+            # Now flatten into a single list, since this
             # returns us a list of lists.
             data = []
             while recv_data:
@@ -777,7 +790,7 @@
             if dtype != data.dtype:
                 data = data.astype(dtype)
             temp = data.copy()
-            self.comm.Allreduce([temp,get_mpi_type(dtype)], 
+            self.comm.Allreduce([temp,get_mpi_type(dtype)],
                                      [data,get_mpi_type(dtype)], op)
             return data
         else:
@@ -884,7 +897,7 @@
         self.comm.Send([buf[0], MPI.INT], dest=target)
         self.comm.Send([buf[1], MPI.DOUBLE], dest=target)
         self.comm.Send([buf[2], MPI.DOUBLE], dest=target)
-        
+
     def recv_quadtree(self, target, tgd, args):
         sizebuf = np.zeros(1, 'int64')
         self.comm.Recv(sizebuf, source=target)
@@ -982,7 +995,7 @@
         if len(send.shape) > 1:
             recv = []
             for i in range(send.shape[0]):
-                recv.append(self.alltoallv_array(send[i,:].copy(), 
+                recv.append(self.alltoallv_array(send[i,:].copy(),
                                                  total_size, offsets, sizes))
             recv = np.array(recv)
             return recv
@@ -1013,9 +1026,6 @@
                 break
 
 communication_system = CommunicationSystem()
-if parallel_capable:
-    ranks = np.arange(MPI.COMM_WORLD.size)
-    communication_system.push_with_ids(ranks)
 
 class ParallelAnalysisInterface(object):
     comm = None
@@ -1067,7 +1077,7 @@
 
     def partition_index_2d(self, axis):
         if not self._distributed:
-           return False, self.index.grid_collection(self.center, 
+           return False, self.index.grid_collection(self.center,
                                                         self.index.grids)
 
         xax, yax = x_dict[axis], y_dict[axis]
@@ -1143,7 +1153,7 @@
         LE, RE = left_edge[:], right_edge[:]
         if not self._distributed:
             raise NotImplemented
-            return LE, RE, re
+            return LE, RE #, re
 
         cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
         mi = self.comm.rank % (self.comm.size / rank_ratio)
@@ -1182,10 +1192,10 @@
 
         cc = MPI.Compute_dims(self.comm.size, 3)
         si = self.comm.size
-        
+
         factors = factor(si)
         xyzfactors = [factor(cc[0]), factor(cc[1]), factor(cc[2])]
-        
+
         # Each entry of cuts is a two element list, that is:
         # [cut dim, number of cuts]
         cuts = []
@@ -1204,7 +1214,7 @@
                     break
                 nextdim = (nextdim + 1) % 3
         return cuts
-    
+
 class GroupOwnership(ParallelAnalysisInterface):
     def __init__(self, items):
         ParallelAnalysisInterface.__init__(self)
@@ -1229,7 +1239,7 @@
             self.pointer += 1
         if self.item is not old_item:
             self.switch()
-            
+
     def dec(self, n = -1):
         old_item = self.item
         if n == -1: n = self.comm.size

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/visualization/fixed_resolution.py
--- a/yt/visualization/fixed_resolution.py
+++ b/yt/visualization/fixed_resolution.py
@@ -25,6 +25,8 @@
 import _MPL
 import numpy as np
 import weakref
+import re
+import string
 
 class FixedResolutionBuffer(object):
     r"""
@@ -147,6 +149,41 @@
             if f not in exclude and f[0] not in self.data_source.pf.particle_types:
                 self[f]
 
+
+    def _is_ion( self, fname ):
+        p = re.compile("_p[0-9]+_")
+        result = False
+        if p.search( fname ) != None:
+            result = True
+        return result
+
+    def _ion_to_label( self, fname ):
+        pnum2rom = {
+            "0":"I", "1":"II", "2":"III", "3":"IV", "4":"V",
+            "5":"VI", "6":"VII", "7":"VIII", "8":"IX", "9":"X",
+            "10":"XI", "11":"XII", "12":"XIII", "13":"XIV", "14":"XV",
+            "15":"XVI", "16":"XVII", "17":"XVIII", "18":"XIX", "19":"XX"}
+
+        p = re.compile("_p[0-9]+_")
+        m = p.search( fname )
+        if m != None:
+            pstr = m.string[m.start()+1:m.end()-1]
+            segments = fname.split("_")
+            for i,s in enumerate(segments):
+                segments[i] = string.capitalize(s)
+                if s == pstr:
+                    ipstr = i
+            element = segments[ipstr-1]
+            roman = pnum2rom[pstr[1:]] 
+            label = element + '\/' + roman + '\/' + \
+                string.join( segments[ipstr+1:], '\/' ) 
+        else:
+            label = fname
+        return label
+
+
+
+
     def _get_info(self, item):
         info = {}
         ftype, fname = field = self.data_source._determine_fields(item)[0]
@@ -172,8 +209,13 @@
         
         info['label'] = finfo.display_name
         if info['label'] is None:
-            info['label'] = r'$\rm{'+fname+r'}$'
-            info['label'] = r'$\rm{'+fname.replace('_','\/').title()+r'}$'
+            if self._is_ion( fname ):
+                fname = self._ion_to_label( fname )
+                info['label'] = r'$\rm{'+fname+r'}$'
+                info['label'] = r'$\rm{'+fname.replace('_','\/')+r'}$'
+            else:    
+                info['label'] = r'$\rm{'+fname+r'}$'
+                info['label'] = r'$\rm{'+fname.replace('_','\/').title()+r'}$'
         elif info['label'].find('$') == -1:
             info['label'] = info['label'].replace(' ','\/')
             info['label'] = r'$\rm{'+info['label']+r'}$'
@@ -280,16 +322,11 @@
             requested.
         """
 
-        try:
-            import astropy.io.fits as pyfits
-        except:
-            mylog.error("You don't have AstroPy installed!")
-            raise ImportError
         from yt.utilities.fits_image import FITSImageBuffer
 
         extra_fields = ['x','y','z','px','py','pz','pdx','pdy','pdz','weight_field']
         if fields is None: 
-            fields = [field for field in self.data_source.fields 
+            fields = [field[-1] for field in self.data_source.field_data
                       if field not in extra_fields]
 
         fib = FITSImageBuffer(self, fields=fields, units=units)

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -93,6 +93,10 @@
         item = self.data_source._determine_fields(item)[0]
         return dict.__getitem__(self, item)
 
+    def __contains__(self, item):
+        item = self.data_source._determine_fields(item)[0]
+        return dict.__contains__(self, item)
+
     def __init__(self, data_source, *args):
         self.data_source = data_source
         return dict.__init__(self, args)

diff -r 52835cd5c85523dffc152a7e45e9c92b5a1f5b37 -r 44d16296b6079041900721374a98e3061884b945 yt/visualization/tests/test_callbacks.py
--- a/yt/visualization/tests/test_callbacks.py
+++ b/yt/visualization/tests/test_callbacks.py
@@ -163,5 +163,4 @@
         p.annotate_grids(alpha=0.7, min_pix=10, min_pix_ids=30,
             draw_ids=True, periodic=False, min_level=2,
             max_level=3, cmap="gist_stern")
-        p.save()
-
+        p.save(prefix)


https://bitbucket.org/yt_analysis/yt/commits/b987d7ce492c/
Changeset:   b987d7ce492c
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-02 17:17:24
Summary:     clarified comment on rho_crit
Affected #:  1 file

diff -r 44d16296b6079041900721374a98e3061884b945 -r b987d7ce492c3159014b31277d943a9917a26c6c yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -79,7 +79,8 @@
 # flux
 jansky_cgs = 1.0e-23
 # Cosmological constants
-# Calculated with H = 100 km/s/Mpc
+# Calculated with H = 100 km/s/Mpc, value given in units of h^2 g cm^-3
+# Multiply by h^2 to get the critical density in units of g cm^-3
 rho_crit_g_cm3_h2 = 1.8788e-29
 
 # Misc. Approximations


https://bitbucket.org/yt_analysis/yt/commits/96fa9b9a6595/
Changeset:   96fa9b9a6595
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-02 23:53:07
Summary:     merged
Affected #:  3 files

diff -r 2ef55a7a3a8b8ba66fd8f68c31b744b91ff361ee -r 96fa9b9a6595fd5cd7e1f40be8757906510d9ef9 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -6,7 +6,7 @@
 .. versionadded:: 1.6
 
 The Halo Mass Function extension is capable of outputting the halo mass function
-for a collection haloes (input), and/or an analytical fit over a given mass range
+for a collection halos (input), and/or an analytical fit over a given mass range
 for a set of specified cosmological parameters.
 
 This extension is based on code generously provided by Brian O'Shea.
@@ -14,7 +14,7 @@
 General Overview
 ----------------
 
-In order to run this extension on a dataset, the haloes need to be located
+In order to run this extension on a dataset, the halos need to be located
 (using HOP, FOF or Parallel HOP, see :ref:`halo_finding`),
 and their virial masses determined using the
 HaloProfiler (see :ref:`halo_profiling`).
@@ -24,7 +24,32 @@
 cosmological parameters will need to be input as well. These initial parameters
 are not stored in an Enzo dataset, so they must be set by hand.
 An analytical fit can be found without referencing a particular dataset or
-set of haloes, but all the cosmological parameters need to be set by hand.
+set of halos, but all the cosmological parameters need to be set by hand.
+
+Basic Halo Mass Function Creation
+---------------------------------
+
+Creating the halo mass function of simulated halos requires only loading a halo
+dataset and passing this to HaloMassFnc().
+
+.. code-block:: python
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+  halos_ds = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=halos_ds)
+
+If an analytic fit is also desired, additionally set `make_analytic=True', 
+changing the last line above to the following:
+
+.. code-block:: python
+  hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
+
+`hmf' is a HaloMassFnc object off which arrays holding the simulated halo masses,
+cumulative halo density
+
+
+
+
 
 Analytical Fits
 ---------------
@@ -45,38 +70,41 @@
 The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which
 appears to fit HOP threshold=80.0 fairly well.
 
-Analyze Simulated Haloes
+Analyze Simulated halos
 ------------------------
 
-If an analytical fit is not needed, it is simple to analyze a set of 
-haloes. The ``halo_file`` needs to be specified, and
-``fitting_function`` does not need to be specified.
-``num_sigma_bins`` is how many bins the halo masses are sorted into.
-The default is 360. ``mass_column`` is the zero-indexed column of the
-``halo_file`` file that contains the halo masses. The default is 5, which
-corresponds to the sixth column of data in the file.
+To create the halo mass function of halos found in a simulation, only the 
+loaded halo dataset needs to be specified.
 
 .. code-block:: python
-
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", num_sigma_bins=200,
-  mass_column=5)
+  halos_ds = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=halos_ds)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves
-the halo mass function to a text file. By default, both the halo analysis (``haloes``) and
-fit (``fit``) are written to (different) text files, but they can be turned on or off
-explicitly. ``prefix`` sets the name used for the file(s). The haloes file
-is named ``prefix-haloes.dat``, and the fit file ``prefix-fit.dat``.
+This will calculate the cumulative halo mass function for the halo dataset and
+create ``hmf'', a HaloMassFcn object with the arrays ``masses_sim'' and 
+``n_cumulative_sim'' hanging off of it. These arrays hold the halo masses in 
+units of solar mass and the cumulative number density of halos above that mass 
+in comoving Mpc^3, respectively.  
+
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+halo mass function to a text file. 
+
+
+
+By default, both the mass function of the
+simulated halos (``simulated``) and analytic fit (``analytic``) are written to 
+text files, but they can be turned on or off explicitly. ``prefix`` sets the name used for the file(s). The halos file
+is named ``prefix-halos.dat``, and the fit file ``prefix-fit.dat``.
 Continued from above, invoking this command:
 
 .. code-block:: python
 
-  hmf.write_out(prefix='hmf', fit=False, haloes=True)
+  hmf.write_out(prefix='hmf', fit=False, halos=True)
 
-will save the haloes data to a file named ``hmf-haloes.dat``. The contents
-of the ``-haloes.dat`` file is three columns:
+will save the halos data to a file named ``hmf-halos.dat``. The contents
+of the ``-halos.dat`` file is three columns:
 
   1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
   2. mass (Msolar/h) for this bin.
@@ -85,9 +113,43 @@
 Analytical Halo Mass Function Fit
 ---------------------------------
 
+To create an analytic mass function, several additional parameters which are 
+not necessarily attached to the dataset will need to be provided. If a halo 
+or simulation dataset is provided, the values that can be extracted directly 
+from it will be used. The following parameters will need to be set:
+
+`make_analytic=True'
+
+:math:`\Omega_{m}', `omega_matter0', Default=0.2726
+
+:math:`\Omega_{\Lambda}', `omega_lambda0', Default=0.7274
+
+:math:`\Omega_{b}', `omega_baryon0', Default=0.0456
+
+:math:`h', `hubble0', Default=0.704
+
+:math:`\sigma_8', `sigma8input', Default=0.86
+
+primordial index, `primordial_index', Default=1.0
+
+redshift, `this_redshift', Default=None
+
+log of the minimum halo mass, :math:`log_{10}M_{min}', `log_mass_min', Default=None
+
+log of the maximum halo mass, :math:`log_{10}M_{max}', `log_mass_max', Default=None
+
+Providing a simulation or halo dataset will generally set `omega_matter0',
+`omega_lambda0', `hubble0', and `this_redshift'. If `log_mass_min' or 
+`log_mass_max' are not specified but a halo dataset has been provided, the 
+range of halo masses will be used to set these parameters.
+
+
+
+
+
 When an analytical fit is desired, in nearly all cases several cosmological
 parameters will need to be specified by hand. These parameters are not
-stored with Enzo datasets. In the case where both the haloes and an analytical
+stored with Enzo datasets. In the case where both the halos and an analytical
 fit are desired, the analysis is instantiated as below.
 ``sigma8input``, ``primordial_index`` and ``omega_baryon0`` should be set to
 the same values as
@@ -108,7 +170,7 @@
   fitting_function=4)
   hmf.write_out(prefix='hmf')
 
-Both the ``-haloes.dat`` and ``-fit.dat`` files are written to disk.
+Both the ``-halos.dat`` and ``-fit.dat`` files are written to disk.
 The contents of the ``-fit.dat`` file is four columns:
 
   1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
@@ -116,12 +178,12 @@
   3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3) in this bin.
   4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
 
-Below is an example of the output for both the haloes and the (Warren)
+Below is an example of the output for both the halos and the (Warren)
 analytical fit, for three datasets. The black lines are the calculated
 halo mass functions, and the blue lines the analytical fit set by initial
 conditions. This simulation shows typical behavior, in that there are too
-few small haloes compared to the fit due to lack of mass and gravity resolution
-for small haloes. But at higher mass ranges, the simulated haloes are quite close
+few small halos compared to the fit due to lack of mass and gravity resolution
+for small halos. But at higher mass ranges, the simulated halos are quite close
 to the analytical fit.
 
 .. image:: _images/halo_mass_function.png
@@ -145,7 +207,7 @@
   omega_baryon0=0.06, hubble0=.7, this_redshift=0., log_mass_min=8.,
   log_mass_max=13., sigma8input=0.9, primordial_index=1.,
   fitting_function=1)
-  hmf.write_out(prefix="hmf-press-schechter", fit=True, haloes=False)
+  hmf.write_out(prefix="hmf-press-schechter", fit=True, halos=False)
 
 It is possible to access the output of the halo mass function without saving
 to disk. The content is stored in arrays hanging off the ``HaloMassFcn``

diff -r 2ef55a7a3a8b8ba66fd8f68c31b744b91ff361ee -r 96fa9b9a6595fd5cd7e1f40be8757906510d9ef9 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -21,62 +21,143 @@
     ParallelDummy, \
     ParallelAnalysisInterface, \
     parallel_blocking_call
-from yt.utilities.physical_constants import \
-    cm_per_mpc, \
-    mass_sun_cgs
 from yt.utilities.physical_ratios import \
     rho_crit_g_cm3_h2
+from yt.utilities.logger import ytLogger as mylog
 
 class HaloMassFcn(ParallelAnalysisInterface):
+    r"""
+    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    a function of mass.  A mass function can be created for a set of 
+    simulated halos, an analytic fit to can be created for a redshift and 
+    set of cosmological parameters, or both can be created.
+
+    Provided with a halo dataset object, this will make a the mass function 
+    for simulated halos.  Prodiving a simulation dataset will set as many 
+    of the cosmological parameters as possible for the creation of the 
+    analytic mass function.
+
+    The HaloMassFcn object has arrays hanging off of it containing the mass
+    function information.
+
+    masses_sim : Array 
+        Halo masses from simulated halos.
+    n_cumulative_sim : Array
+        Number density of halos with mass greater than the corresponding 
+        mass in masses_sim (simulated).
+    massarray : Array
+        Masses used for the generation of the analytic mass function.
+    nofmz_cum : Array
+        Number density of halos with mass greater then the corresponding
+        mass in massarray (analytic).
+
+    The HaloMassFcn object also has a convenience function write_out() that
+    will write out the data to disk.
+
+    Creating a HaloMassFcn object with no arguments will produce an analytic
+    mass function at redshift = 0 using default cosmolocigal values.
+
+    Parameters
+    ----------
+    simulation_ds : Simulation dataset object
+        The loaded simulation dataset, used to set cosmological paramters.
+        Default : None.
+    halos_ds : Halo dataset object
+        The halos from a simulation to be used for creation of the 
+        halo mass function in the simulation.
+        Default : None.
+    make_analytic : bool 
+        Whether or not to calculate the analytic mass function to go with 
+        the simulated halo mass function.  Automatically set to true if a 
+        simulation dataset is provided.
+        Default : False.
+    omega_matter0 : float
+        The fraction of the universe made up of matter (dark and baryonic). 
+        Default : 0.2726.
+    omega_lambda0 : float
+        The fraction of the universe made up of dark energy. 
+        Default : 0.7274.
+    omega_baryon0  : float 
+        The fraction of the universe made up of baryonic matter. This is not 
+        always stored in the datset and should be checked by hand.
+        Default : 0.0456.
+    hubble0 : float 
+        The expansion rate of the universe in units of 100 km/s/Mpc. 
+        Default : 0.704.
+    sigma8input : float 
+        The amplitude of the linear power spectrum at z=0 as specified by 
+        the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
+        8 Mpc/h. This is not always stored in the datset and should be 
+        checked by hand.
+        Default : 0.86.
+    primoridal_index : float 
+        This is the index of the mass power spectrum before modification by 
+        the transfer function. A value of 1 corresponds to the scale-free 
+        primordial spectrum. This is not always stored in the datset and 
+        should be checked by hand.
+        Default : 1.0.
+    this_redshift : float 
+        The current redshift. 
+        Default : 0.
+    log_mass_min : float 
+        The log10 of the mass of the minimum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 5.
+        Default : None.
+    log_mass_max : float 
+        The log10 of the mass of the maximum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 15.
+        Default : None.
+    num_sigma_bins : float
+        The number of bins (points) to use for the calculation of the 
+        analytic mass function. 
+        Default : 360.
+    fitting_function : int
+        Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+        3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
+        Default : 4.
+
+    Examples
+    --------
+
+    This creates the halo mass function for a halo dataset from a simulation
+    and the analytic mass function at the same redshift as the dataset,
+    using as many cosmological parameters as can be pulled from the dataset.
+
+    >>> halos_ds = load("rockstar_halos/halo_0.0.bin")
+    >>> hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
+    >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
+
+    This creates only the analytic halo mass function for a simulation
+    dataset, with default values for cosmological paramters not stored in 
+    the dataset.
+
+    >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> hmf = HaloMassFcn(ds=ds)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
+    
+    This creates the analytic mass function for an arbitrary set of 
+    cosmological parameters, without either a simulation or halo dataset.
+
+    >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+                          omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
+                          log_mass_min=5, log_mass_max=9)
+    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.savefig("mass_function.png")
     """
-    Initalize a HaloMassFcn object to analyze the distribution of haloes
-    as a function of mass.
-    :param halo_file (str): The filename of the output of the Halo Profiler.
-    Default=None.
-    :param omega_matter0 (float): The fraction of the universe made up of
-    matter (dark and baryonic). Default=None.
-    :param omega_lambda0 (float): The fraction of the universe made up of
-    dark energy. Default=None.
-    :param omega_baryon0 (float): The fraction of the universe made up of
-    ordinary baryonic matter. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.05.
-    :param hubble0 (float): The expansion rate of the universe in units of
-    100 km/s/Mpc. Default=None.
-    :param sigma8input (float): The amplitude of the linear power
-    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-    in a top-hat sphere of radius 8 Mpc/h. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.86.
-    :param primoridal_index (float): This is the index of the mass power
-    spectrum before modification by the transfer function. A value of 1
-    corresponds to the scale-free primordial spectrum. This should match
-    the value used to make the initial conditions using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=1.0.
-    :param this_redshift (float): The current redshift. Default=None.
-    :param log_mass_min (float): The log10 of the mass of the minimum of the
-    halo mass range. Default=None.
-    :param log_mass_max (float): The log10 of the mass of the maximum of the
-    halo mass range. Default=None.
-    :param num_sigma_bins (float): The number of bins (points) to use for
-    the calculations and generated fit. Default=360.
-    :param fitting_function (int): Which fitting function to use.
-    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-    5 = Tinker
-    Default=4.
-    :param mass_column (int): The column of halo_file that contains the
-    masses of the haloes. Default=4.
-    """
-    def __init__(self, pf, halo_file=None, omega_matter0=None, omega_lambda0=None,
-    omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
-    this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
-    fitting_function=4, mass_column=5):
+    def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=False, 
+    omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, 
+    sigma8input=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
+    log_mass_max=None, num_sigma_bins=360, fitting_function=4):
         ParallelAnalysisInterface.__init__(self)
-        self.pf = pf
-        self.halo_file = halo_file
+        self.simulation_ds = simulation_ds
+        self.halos_ds = halos_ds
         self.omega_matter0 = omega_matter0
         self.omega_lambda0 = omega_lambda0
         self.omega_baryon0 = omega_baryon0
@@ -88,126 +169,143 @@
         self.log_mass_max = log_mass_max
         self.num_sigma_bins = num_sigma_bins
         self.fitting_function = fitting_function
-        self.mass_column = mass_column
-        
-        # Determine the run mode.
-        if halo_file is None:
-            # We are hand-picking our various cosmological parameters
-            self.mode = 'single'
-        else:
-            # Make the fit using the same cosmological parameters as the dataset.
-            self.mode = 'haloes'
-            self.omega_matter0 = self.pf.omega_matter
-            self.omega_lambda0 = self.pf.omega_lambda
-            self.hubble0 = self.pf.hubble_constant
-            self.this_redshift = self.pf.current_redshift
-            self.read_haloes()
-            if self.log_mass_min == None:
-                self.log_mass_min = math.log10(min(self.haloes))
-            if self.log_mass_max == None:
-                self.log_mass_max = math.log10(max(self.haloes))
+        self.make_analytic = make_analytic
+        self.make_simulated = False
+        """
+        If we want to make an analytic mass function, grab what we can from either the 
+        halo file or the data set, and make sure that the user supplied everything else
+        that is needed.
+        """
+        # If we don't have any datasets, make the analytic function with user values
+        if simulation_ds is None and halos_ds is None:
+            self.make_analytic=True
+            # Set a reasonable mass min and max if none were provided
+            if log_mass_min is None:
+                self.log_mass_min = 5
+            if log_mass_max is None:
+                self.log_mass_max = 15
 
-        # Input error check.
-        if self.mode == 'single':
-            if omega_matter0 == None or omega_lambda0 == None or \
-            hubble0 == None or this_redshift == None or log_mass_min == None or\
-            log_mass_max == None:
-                mylog.error("All of these parameters need to be set:")
-                mylog.error("[omega_matter0, omega_lambda0, \
-                hubble0, this_redshift, log_mass_min, log_mass_max]")
-                mylog.error("[%s,%s,%s,%s,%s,%s]" % (omega_matter0,\
-                omega_lambda0, hubble0, this_redshift,\
-                log_mass_min, log_mass_max))
-                return None
-        
-        # Poke the user to make sure they're doing it right.
-        mylog.info(
+        # If we are given a simulation dataset, make the analytic mass function
+        if simulation_ds is not None:
+            self.make_analytic = True
+
+        # If we're making the analytic function...
+        if self.make_analytic == True:
+            # Try to set cosmological parameters from the simulation dataset
+            if simulation_ds is not None:
+                self.omega_matter0 = self.simulation_ds.omega_matter
+                self.omega_lambda0 = self.simulation_ds.omega_lambda
+                self.hubble0 = self.simulation_ds.hubble_constant
+                self.this_redshift = self.simulation_ds.current_redshift
+                # Set a reasonable mass min and max if none were provided
+                if log_mass_min is None:
+                    self.log_mass_min = 5
+                if log_mass_max is None:
+                    self.log_mass_max = 15
+            # If we have a halo dataset but not a simulation dataset, use that instead
+            if simulation_ds is None and halos_ds is not None:
+                self.omega_matter0 = self.halos_ds.omega_matter
+                self.omega_lambda0 = self.halos_ds.omega_lambda
+                self.hubble0 = self.halos_ds.hubble_constant
+                self.this_redshift = self.halos_ds.current_redshift
+                # If the user didn't specify mass min and max, set them from the halos
+                if log_mass_min is None:
+                    self.set_mass_from_halos("min_mass")
+                if log_mass_max is None:
+                    self.set_mass_from_halos("max_mass")
+            # Do the calculations.
+            self.sigmaM()
+            self.dndm()
+
         """
-        Please make sure these are the correct values! They are
-        not stored in enzo datasets, so must be entered by hand.
-        sigma8input=%f primordial_index=%f omega_baryon0=%f
-        """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        
-        # Do the calculations.
-        self.sigmaM()
-        self.dndm()
-        
-        if self.mode == 'haloes':
-            self.bin_haloes()
+        If a halo file has been supplied, make a mass function for the simulated halos.
+        """
+        if halos_ds is not None:
+            # Used to check if a simulated halo mass funciton exists to write out
+            self.make_simulated=True
+            # Calculate the simulated halo mass function
+            self.create_sim_hmf()
 
-    def write_out(self, prefix='HMF', fit=True, haloes=True):
+    """
+    If we're making an analytic fit and have a halo dataset, but don't have log_mass_min 
+    or log_mass_max from the user, set it from the range of halo masses.
+    """
+    def set_mass_from_halos(self, which_limit):
+        data_source = self.halos_ds.all_data()
+        if which_limit is "min_mass":
+            self.log_mass_min = int(np.log10(np.amin(data_source['ParticleMassMsun'])))
+        if which_limit is "max_mass":
+            self.log_mass_max = int(np.log10(np.amax(data_source['ParticleMassMsun'])))+1
+    
+    """
+    Here's where we create the halo mass functions from simulated halos
+    """
+    def create_sim_hmf(self):
+        data_source = self.halos_ds.all_data()
+        # We're going to use indices to count the number of halos above a given mass
+        masses_sim = np.sort(data_source['ParticleMassMsun'])
+        # Determine the size of the simulation volume in comoving Mpc**3
+        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')).prod()
+        n_cumulative_sim = np.arange(len(masses_sim),0,-1)
+        # We don't want repeated halo masses, and the unique indices tell us which values 
+        # correspond to distinct halo masses.
+        self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
+        # Now make this an actual number density of halos as a function of mass.
+        self.n_cumulative_sim = n_cumulative_sim[unique_indices]/sim_volume
+        # masses_sim and n_cumulative_sim are now set, but remember that the log10 quantities
+        # are what is usually plotted for a halo mass function.
+
+    def write_out(self, prefix='HMF', analytic=True, simulated=True):
         """
         Writes out the halo mass functions to file(s) with prefix *prefix*.
         """
-        # First the fit file.
-        if fit:
-            fitname = prefix + '-fit.dat'
-            fp = self.comm.write_on_root(fitname)
-            line = \
-            """#Columns:
+        # First the analytic file, check that analytic fit exists and was requested
+        if analytic:
+            if self.make_analytic:
+                fitname = prefix + '-analytic.dat'
+                fp = self.comm.write_on_root(fitname)
+                line = \
+                """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
-#3. (dn/dM)*dM (differential number density of haloes, per Mpc^3 (NOT h^3/Mpc^3)
+#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
 #4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
-            fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dn_M_z[i], self.nofmz_cum[i])
                 fp.write(line)
-            fp.close()
-        if self.mode == 'haloes' and haloes:
-            haloname = prefix + '-haloes.dat'
-            fp = self.comm.write_on_root(haloname)
-            line = \
-            """#Columns:
+                for i in xrange(self.logmassarray.size - 1):
+                    line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
+                    self.dn_M_z[i], self.nofmz_cum[i])
+                    fp.write(line)
+                fp.close()
+            # If the analytic halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The analytic halo mass function was not created and cannot be written \
+out! Specify its creation with HaloMassFcn(make_analytic=True, other_args) \
+when creating the HaloMassFcn object.")
+        # Write out the simulated mass fucntion if it exists and was requested
+        if simulated:
+            if self.make_simulated:
+                haloname = prefix + '-simulated.dat'
+                fp = self.comm.write_on_root(haloname)
+                line = \
+                """#Columns:
 #1. log10 of mass (Msolar, NOT Msolar/h)
 #2. mass (Msolar/h)
-#3. cumulative number density of haloes (per Mpc^3, NOT h^3/Mpc^3)
+#3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
 """
-            fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dis[i])
                 fp.write(line)
-            fp.close()
-        
-    def read_haloes(self):
-        """
-        Read in the virial masses of the haloes.
-        """
-        mylog.info("Reading halo masses from %s" % self.halo_file)
-        f = open(self.halo_file,'r')
-        line = f.readline()
-        if line == "":
-            self.haloes = np.array([])
-            return
-        while line[0] == '#':
-            line = f.readline()
-        self.haloes = []
-        while line:
-            line = line.split()
-            mass = float(line[self.mass_column])
-            if mass > 0:
-                self.haloes.append(float(line[self.mass_column]))
-            line = f.readline()
-        f.close()
-        self.haloes = np.array(self.haloes)
-
-    def bin_haloes(self):
-        """
-        With the list of virial masses, find the halo mass function.
-        """
-        bins = np.logspace(self.log_mass_min,
-            self.log_mass_max,self.num_sigma_bins)
-        avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = np.histogram(self.haloes,bins)
-        # add right to left
-        for i,b in enumerate(dis):
-            dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
-            if i == (self.num_sigma_bins - 3): break
-
-        self.dis = dis  / (self.pf.domain_width * self.pf.units["mpccm"]).prod()
+                for i in xrange(self.masses_sim.size - 1):
+                    line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
+                    self.masses_sim[i]/self.hubble0,
+                    self.n_cumulative_sim[i])
+                    fp.write(line)
+                fp.close()
+            # If the simulated halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The simulated halo mass function was not created and cannot be written \
+out! Specify its creation by providing a loaded halo dataset with \
+HaloMassFcn(ds_halos=loaded_halo_dataset, other_args) when creating \
+the HaloMassFcn object.")
 
     def sigmaM(self):
         """
@@ -255,8 +353,9 @@
         sigma_normalization = self.sigma8input / sigma8_unnorm;
 
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-                rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
+               .in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()       
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -289,11 +388,12 @@
             # All done!
 
     def dndm(self):
-        
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-            rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
+               .in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()
+
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
         nofmz_cum = 0.0;  # keep track of cumulative number density

diff -r 2ef55a7a3a8b8ba66fd8f68c31b744b91ff361ee -r 96fa9b9a6595fd5cd7e1f40be8757906510d9ef9 yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -79,6 +79,8 @@
 # flux
 jansky_cgs = 1.0e-23
 # Cosmological constants
+# Calculated with H = 100 km/s/Mpc, value given in units of h^2 g cm^-3
+# Multiply by h^2 to get the critical density in units of g cm^-3
 rho_crit_g_cm3_h2 = 1.8788e-29
 
 # Misc. Approximations


https://bitbucket.org/yt_analysis/yt/commits/129325961910/
Changeset:   129325961910
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-03 23:53:16
Summary:     removed radius array that is never used, corrected the units in the docstrings that were wrong.
Affected #:  1 file

diff -r 96fa9b9a6595fd5cd7e1f40be8757906510d9ef9 -r 129325961910bdb77f5eb4678b6b0e9869560a9f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -41,15 +41,18 @@
     function information.
 
     masses_sim : Array 
-        Halo masses from simulated halos.
+        Halo masses from simulated halos. Units: M_solar.
     n_cumulative_sim : Array
         Number density of halos with mass greater than the corresponding 
-        mass in masses_sim (simulated).
-    massarray : Array
-        Masses used for the generation of the analytic mass function.
-    nofmz_cum : Array
+        mass in masses_sim (simulated). Units: comoving (Mpc/h)^-3
+    masses_analytic : Array
+        Masses used for the generation of the analytic mass function, Units:
+        M_solar.
+    n_cumulative_analytic : Array
         Number density of halos with mass greater then the corresponding
-        mass in massarray (analytic).
+        mass in masses_analytic (analytic). Units: comoving (Mpc/h)^-3
+    dndM_dM_analytic : Array
+        Differential number density of halos, (dn/dM)*dM (analytic).
 
     The HaloMassFcn object also has a convenience function write_out() that
     will write out the data to disk.
@@ -103,13 +106,13 @@
         The log10 of the mass of the minimum of the halo mass range. This is
         set automatically by the range of halo masses if a simulated halo 
         dataset is provided. If a halo dataset if not provided and no value
-        is specified, it will be set to 5.
+        is specified, it will be set to 5. Units: M_solar
         Default : None.
     log_mass_max : float 
         The log10 of the mass of the maximum of the halo mass range. This is
         set automatically by the range of halo masses if a simulated halo 
         dataset is provided. If a halo dataset if not provided and no value
-        is specified, it will be set to 15.
+        is specified, it will be set to 15. Units: M_solar
         Default : None.
     num_sigma_bins : float
         The number of bins (points) to use for the calculation of the 
@@ -130,7 +133,7 @@
     >>> halos_ds = load("rockstar_halos/halo_0.0.bin")
     >>> hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
     >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
-    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
 
     This creates only the analytic halo mass function for a simulation
@@ -139,7 +142,7 @@
 
     >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
     >>> hmf = HaloMassFcn(ds=ds)
-    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
     
     This creates the analytic mass function for an arbitrary set of 
@@ -148,7 +151,7 @@
     >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
                           omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
                           log_mass_min=5, log_mass_max=9)
-    >>> plt.loglog(hmf.massarray, hmf.nofmz_cum)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
     """
     def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=False, 
@@ -216,6 +219,8 @@
             # Do the calculations.
             self.sigmaM()
             self.dndm()
+            # Return the mass array in M_solar rather than M_solar/h
+            self.masses_analytic *= self.hubble0
 
         """
         If a halo file has been supplied, make a mass function for the simulated halos.
@@ -245,7 +250,7 @@
         # We're going to use indices to count the number of halos above a given mass
         masses_sim = np.sort(data_source['ParticleMassMsun'])
         # Determine the size of the simulation volume in comoving Mpc**3
-        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')).prod()
+        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')/self.hubble0).prod()
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)
         # We don't want repeated halo masses, and the unique indices tell us which values 
         # correspond to distinct halo masses.
@@ -266,15 +271,14 @@
                 fp = self.comm.write_on_root(fitname)
                 line = \
                 """#Columns:
-#1. log10 of mass (Msolar, NOT Msolar/h)
-#2. mass (Msolar/h)
-#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-#4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
+#1. mass (M_solar)
+#2. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
+#3. cumulative number density of halos (comoving (Mpc/h)^3)
 """
                 fp.write(line)
-                for i in xrange(self.logmassarray.size - 1):
-                    line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                    self.dn_M_z[i], self.nofmz_cum[i])
+                for i in xrange(self.masses_analytic.size - 1):
+                    line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
+                    self.dndM_dM_analytic[i], self.n_cumulative_analytic[i])
                     fp.write(line)
                 fp.close()
             # If the analytic halo mass function wasn't created, warn the user
@@ -289,14 +293,14 @@
                 fp = self.comm.write_on_root(haloname)
                 line = \
                 """#Columns:
-#1. log10 of mass (Msolar, NOT Msolar/h)
-#2. mass (Msolar/h)
-#3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
+#1. log10 of mass (M_solar)
+#2. mass (M_solar)
+#3. cumulative number density of halos (comoving (Mpc/h)^3)
 """
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
                     line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
-                    self.masses_sim[i]/self.hubble0,
+                    self.masses_sim[i],
                     self.n_cumulative_sim[i])
                     fp.write(line)
                 fp.close()
@@ -321,10 +325,8 @@
         
          Outputs: four columns of data containing the following information:
 
-         1) log mass (Msolar)
-         2) mass (Msolar/h)
-         3) Radius (comoving Mpc/h)
-         4) sigma (normalized) using Msun/h as the input
+         1) mass (Msolar/h)
+         2) sigma (normalized) using Msun/h as the input
          
          The arrays output are used later.
         """
@@ -337,13 +339,9 @@
             mylog.error("You should probably fix your cosmology parameters!")
 
         # output arrays
-        # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
-        # 2) mass (Msolar/h)
-        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
-        # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
-        # 4) sigma(M, z=0, where mass is in Msun/h)
+        # 1) mass (M_solar/h), changed to M_solar/h at output
+        self.masses_analytic = np.empty(self.num_sigma_bins, dtype='float64')
+        # 2) sigma(M, z=0, where mass is in Msun/h)
         self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
@@ -379,9 +377,7 @@
     
             R = thisradius; # h^-1 Mpc (comoving)
     
-            self.Rarray[i] = thisradius;  # h^-1 Mpc (comoving)
-            self.logmassarray[i] = thislogmass;  # Msun (NOT Msun/h)
-            self.massarray[i] = thismass;  # Msun/h
+            self.masses_analytic[i] = thismass;  # Msun/h
     
             # get normalized sigma(R)
             self.sigmaarray[i] = math.sqrt(self.sigma_squared_of_R(R)) * sigma_normalization;
@@ -396,16 +392,16 @@
 
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
-        nofmz_cum = 0.0;  # keep track of cumulative number density
+        n_cumulative_analytic = 0.0;  # keep track of cumulative number density
         
         # Loop over masses, going BACKWARD, and calculate dn/dm as well as the 
         # cumulative mass function.
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
+        self.dndM_dM_analytic = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
+        self.n_cumulative_analytic = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -413,25 +409,25 @@
             thissigma = self.sigmaof_M_z(i, self.this_redshift);
             nextsigma = self.sigmaof_M_z(i+1, self.this_redshift);
             
-            # calc dsigmadm - has units of h (since massarray has units of h^-1)
-            dsigmadm = (nextsigma-thissigma) / (self.massarray[i+1] - self.massarray[i]);
+            # calc dsigmadm - has units of h (since masses_analytic has units of h^-1)
+            dsigmadm = (nextsigma-thissigma) / (self.masses_analytic[i+1] - self.masses_analytic[i]);
 
             # calculate dn(M,z) (dn/dM * dM)
             # this has units of h^3 since rho0 has units of h^2, dsigmadm
-            # has units of h, and massarray has units of h^-1
-            dn_M_z = -1.0 / thissigma * dsigmadm * rho0 / self.massarray[i] * \
-            self.multiplicityfunction(thissigma)*(self.massarray[i+1] - self.massarray[i]);
+            # has units of h, and masses_analytic has units of h^-1
+            dndM_dM_analytic = -1.0 / thissigma * dsigmadm * rho0 / self.masses_analytic[i] * \
+            self.multiplicityfunction(thissigma)*(self.masses_analytic[i+1] - self.masses_analytic[i]);
 
             # scale by h^3 to get rid of all factors of h
-            dn_M_z *= math.pow(self.hubble0, 3.0);
+            dndM_dM_analytic *= math.pow(self.hubble0, 3.0);
             
             # keep track of cumulative number density
-            if dn_M_z > 1.0e-20:
-                nofmz_cum += dn_M_z;
+            if dndM_dM_analytic > 1.0e-20:
+                n_cumulative_analytic += dndM_dM_analytic;
             
             # Store this.
-            self.nofmz_cum[i] = nofmz_cum
-            self.dn_M_z[i] = dn_M_z
+            self.n_cumulative_analytic[i] = n_cumulative_analytic
+            self.dndM_dM_analytic[i] = dndM_dM_analytic
         
 
     def sigma_squared_of_R(self, R):


https://bitbucket.org/yt_analysis/yt/commits/025ee100a435/
Changeset:   025ee100a435
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-04 20:34:06
Summary:     make_analytic is now True by default, and the narative docs have updated to reflect all the changes in halo_mass_function.py
Affected #:  2 files

diff -r 129325961910bdb77f5eb4678b6b0e9869560a9f -r 025ee100a4355a4f7f1d824427906b633b1504ba doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -14,41 +14,24 @@
 General Overview
 ----------------
 
-In order to run this extension on a dataset, the halos need to be located
-(using HOP, FOF or Parallel HOP, see :ref:`halo_finding`),
-and their virial masses determined using the
-HaloProfiler (see :ref:`halo_profiling`).
-Please see the step-by-step how-to which puts these steps together
-(:ref:`hmf_howto`).
-If an optional analytical fit is desired, the correct initial
-cosmological parameters will need to be input as well. These initial parameters
-are not stored in an Enzo dataset, so they must be set by hand.
-An analytical fit can be found without referencing a particular dataset or
-set of halos, but all the cosmological parameters need to be set by hand.
+A halo mass function can be created for the halos identified in a cosmological 
+simulation, as well as analytic fits using any arbitrary set of cosmological
+paramters. In order to create a mass function for simulated halos, they must
+first be identified (using HOP, FOF, Parallel HOP, or Rockstar, see 
+:ref:`halo_finding`) and loaded as a halo dataset object. The distribution of
+halo masses will then be found, and can be compared to the analytic prediction
+at the same redshift and using the same cosmological parameters as were used
+in the simulation. Care should be taken in this regard, as the analytic fit
+requires the specification of cosmological parameters that are not necessarily 
+stored in the halo or simulation datasets, and must be specified by the user.
+Efforts have been made to set reasonable defaults for these parameters, but 
+setting them to identically match those used in the simulation will produce a
+much better comparison.
 
-Basic Halo Mass Function Creation
----------------------------------
-
-Creating the halo mass function of simulated halos requires only loading a halo
-dataset and passing this to HaloMassFnc().
-
-.. code-block:: python
-  from yt.mods import *
-  from yt.analysis_modules.halo_mass_function.api import *
-  halos_ds = load("rockstar_halos/halos_0.0.bin")
-  hmf = HaloMassFcn(halos_ds=halos_ds)
-
-If an analytic fit is also desired, additionally set `make_analytic=True', 
-changing the last line above to the following:
-
-.. code-block:: python
-  hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
-
-`hmf' is a HaloMassFnc object off which arrays holding the simulated halo masses,
-cumulative halo density
-
-
-
+Analytic halo mass functions can also be created without a halo dataset by 
+providing either a simulation dataset or specifying cosmological parameters by
+hand. yt includes 5 analytic fits for the halo mass function which can be
+selected.
 
 
 Analytical Fits
@@ -70,152 +53,175 @@
 The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which
 appears to fit HOP threshold=80.0 fairly well.
 
-Analyze Simulated halos
-------------------------
 
-To create the halo mass function of halos found in a simulation, only the 
-loaded halo dataset needs to be specified.
+Basic Halo Mass Function Creation
+---------------------------------
 
+The simplest way to create a halo mass function object is to simply pass it no
+arguments and let it use the default cosmological parameters.
+
+..code-block:: python
+  from yt.analysis_modules.halo_mass_function.api import *
+
+  hmf = HaloMassFcn()
+
+This will create a HaloMassFcn object off of which arrays holding the information
+about the analytic mass function hang. Creating the halo mass function for a set
+of simulated halos requires only the loaded halo dataset to be passed as an 
+argument. This also creates the analytic mass function using all parameters that 
+can be extracted from the halo dataset, at the same redshift, spanning a similar
+range of halo masses.
+
+..code-block:: python
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+
+  my_halos = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=my_halos)
+
+A simulation dataset can be passed along with additonal cosmological parameters 
+to create an analytic mass function.
+
+..code-block:: python
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+
+  my_ds = load("RD0027/RedshiftOutput0027")
+  hmf = HaloMassFcn(ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+                    sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
+
+The analytic mass function can be created for a set of arbitrary cosmological 
+parameters without any dataset being passed as an argument.
+
+..code-block:: python
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+
+  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+                    omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
+                    log_mass_min=5, log_mass_max=9, fitting_function=5)
+
+
+
+Keyword Arguments
+-----------------
+
+  * **simulation_ds (*Simulation dataset object*)
+    The loaded simulation dataset, used to set cosmological paramters.
+    Default : None.
+
+  * **halos_ds (*Halo dataset object*)
+    The halos from a simulation to be used for creation of the 
+    halo mass function in the simulation.
+    Default : None.
+
+  * **make_analytic (*bool*)
+    Whether or not to calculate the analytic mass function to go with 
+    the simulated halo mass function.  Automatically set to true if a 
+    simulation dataset is provided.
+    Default : True.
+
+  * **omega_matter0 (*float*)
+    The fraction of the universe made up of matter (dark and baryonic). 
+    Default : 0.2726.
+
+  * **omega_lambda0 (*float*)
+    The fraction of the universe made up of dark energy. 
+    Default : 0.7274.
+
+  * **omega_baryon0  (*float*)
+    The fraction of the universe made up of baryonic matter. This is not 
+    always stored in the datset and should be checked by hand.
+    Default : 0.0456.
+
+  * **hubble0 (*float*)
+    The expansion rate of the universe in units of 100 km/s/Mpc. 
+    Default : 0.704.
+
+  * **sigma8 (*float*)
+    The amplitude of the linear power spectrum at z=0 as specified by 
+    the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
+    8 Mpc/h. This is not always stored in the datset and should be 
+    checked by hand.
+    Default : 0.86.
+
+  * **primoridal_index (*float*)
+    This is the index of the mass power spectrum before modification by 
+    the transfer function. A value of 1 corresponds to the scale-free 
+    primordial spectrum. This is not always stored in the datset and 
+    should be checked by hand.
+    Default : 1.0.
+
+  * **this_redshift (*float*)
+    The current redshift. 
+    Default : 0.
+
+  * **log_mass_min (*float*)
+    The log10 of the mass of the minimum of the halo mass range. This is
+    set automatically by the range of halo masses if a simulated halo 
+    dataset is provided. If a halo dataset if not provided and no value
+    is specified, it will be set to 5. Units: M_solar
+    Default : None.
+
+  * **log_mass_max (*float*)
+    The log10 of the mass of the maximum of the halo mass range. This is
+    set automatically by the range of halo masses if a simulated halo 
+    dataset is provided. If a halo dataset if not provided and no value
+    is specified, it will be set to 15. Units: M_solar
+    Default : None.
+
+  * **num_sigma_bins (*float*)
+    The number of bins (points) to use for the calculation of the 
+    analytic mass function. 
+    Default : 360.
+
+  * **fitting_function (*int*)
+    Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+    3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
+    Default : 4.
+
+
+Outputs
+-------
+A HaloMassFnc object has several arrays hanging off of it containing the 
+  * **masses_sim: Halo masses from simulated halos. Units: M_solar
+
+  * **n_cumulative_sim: Number density of halos with mass greater than the 
+    corresponding mass in masses_sim. Units: comoving (Mpc/h)^-3
+
+  * **masses_analytic: Masses used for the generation of the analytic mass 
+    function. Units: M_solar
+
+  * **n_cumulative_analytic: Number density of halos with mass greater then 
+    the corresponding mass in masses_analytic. Units: comoving (Mpc/h)^-3
+
+  * **dndM_dM_analytic: Differential number density of halos, (dn/dM)*dM.
+
+After the mass function has been created for both simulated halos and the
+corresponding analytic fits, they can be plotted though something along the 
+lines of
 .. code-block:: python
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  halos_ds = load("rockstar_halos/halos_0.0.bin")
-  hmf = HaloMassFcn(halos_ds=halos_ds)
+  import matplotlib.pyplot as plt
 
-This will calculate the cumulative halo mass function for the halo dataset and
-create ``hmf'', a HaloMassFcn object with the arrays ``masses_sim'' and 
-``n_cumulative_sim'' hanging off of it. These arrays hold the halo masses in 
-units of solar mass and the cumulative number density of halos above that mass 
-in comoving Mpc^3, respectively.  
+  my_halos = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=my_halos)
+
+  plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
+  plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
 
 Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
-halo mass function to a text file. 
+halo mass function to a text file. (continued from above)
+.. code-block:: python
+  hmf.write_out(prefix='hmf', analytic=True, simulated=True)
 
+This writes the files `hmf-analytic.dat' with columns 
+  * **mass (M_solar)
+  * **(dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
+  * **cumulative number density of halos (comoving (Mpc/h)^3)
 
-
-By default, both the mass function of the
-simulated halos (``simulated``) and analytic fit (``analytic``) are written to 
-text files, but they can be turned on or off explicitly. ``prefix`` sets the name used for the file(s). The halos file
-is named ``prefix-halos.dat``, and the fit file ``prefix-fit.dat``.
-Continued from above, invoking this command:
-
-.. code-block:: python
-
-  hmf.write_out(prefix='hmf', fit=False, halos=True)
-
-will save the halos data to a file named ``hmf-halos.dat``. The contents
-of the ``-halos.dat`` file is three columns:
-
-  1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
-  2. mass (Msolar/h) for this bin.
-  3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
-
-Analytical Halo Mass Function Fit
----------------------------------
-
-To create an analytic mass function, several additional parameters which are 
-not necessarily attached to the dataset will need to be provided. If a halo 
-or simulation dataset is provided, the values that can be extracted directly 
-from it will be used. The following parameters will need to be set:
-
-`make_analytic=True'
-
-:math:`\Omega_{m}', `omega_matter0', Default=0.2726
-
-:math:`\Omega_{\Lambda}', `omega_lambda0', Default=0.7274
-
-:math:`\Omega_{b}', `omega_baryon0', Default=0.0456
-
-:math:`h', `hubble0', Default=0.704
-
-:math:`\sigma_8', `sigma8input', Default=0.86
-
-primordial index, `primordial_index', Default=1.0
-
-redshift, `this_redshift', Default=None
-
-log of the minimum halo mass, :math:`log_{10}M_{min}', `log_mass_min', Default=None
-
-log of the maximum halo mass, :math:`log_{10}M_{max}', `log_mass_max', Default=None
-
-Providing a simulation or halo dataset will generally set `omega_matter0',
-`omega_lambda0', `hubble0', and `this_redshift'. If `log_mass_min' or 
-`log_mass_max' are not specified but a halo dataset has been provided, the 
-range of halo masses will be used to set these parameters.
-
-
-
-
-
-When an analytical fit is desired, in nearly all cases several cosmological
-parameters will need to be specified by hand. These parameters are not
-stored with Enzo datasets. In the case where both the halos and an analytical
-fit are desired, the analysis is instantiated as below.
-``sigma8input``, ``primordial_index`` and ``omega_baryon0`` should be set to
-the same values as
-``PowerSpectrumSigma8``, ``PowerSpectrumPrimordialIndex`` and
-``CosmologyOmegaBaryonNow`` from the
-`inits <http://lca.ucsd.edu/projects/enzo/wiki/UserGuide/RunningInits>`_
-parameter file used to set up the simulation.
-``fitting_function`` is set to values 1 through 4 from the list of available
-fits above.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_mass_function.api import *
-  pf = load("data0030")
-  hmf = HaloMassFcn(pf, halo_file="FilteredQuantities.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4)
-  hmf.write_out(prefix='hmf')
-
-Both the ``-halos.dat`` and ``-fit.dat`` files are written to disk.
-The contents of the ``-fit.dat`` file is four columns:
-
-  1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
-  2. mass (Msolar/h) for this bin.
-  3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3) in this bin.
-  4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
-
-Below is an example of the output for both the halos and the (Warren)
-analytical fit, for three datasets. The black lines are the calculated
-halo mass functions, and the blue lines the analytical fit set by initial
-conditions. This simulation shows typical behavior, in that there are too
-few small halos compared to the fit due to lack of mass and gravity resolution
-for small halos. But at higher mass ranges, the simulated halos are quite close
-to the analytical fit.
-
-.. image:: _images/halo_mass_function.png
-   :width: 350
-   :height: 400
-
-The analytical fit can be found without referencing a particular dataset. In this
-case, all the various cosmological parameters need to be specified by hand.
-``omega_matter0`` is the fraction of universe that is made up of matter
-(baryons and dark matter). ``omega_lambda0`` is the fractional proportion due
-to dark energy. In a flat universe, ``omega_matter0`` + ``omega_lambda0`` = 1.
-``this_redshift`` is the redshift for which you wish to generate a fit.
-``log_mass_min`` and ``log_mass_max`` are the logarithmic ends of the mass range for which
-you wish to calculate the fit.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_mass_function.api import *
-  hmf = HaloMassFcn(None, omega_matter0=0.3, omega_lambda0=0.7,
-  omega_baryon0=0.06, hubble0=.7, this_redshift=0., log_mass_min=8.,
-  log_mass_max=13., sigma8input=0.9, primordial_index=1.,
-  fitting_function=1)
-  hmf.write_out(prefix="hmf-press-schechter", fit=True, halos=False)
-
-It is possible to access the output of the halo mass function without saving
-to disk. The content is stored in arrays hanging off the ``HaloMassFcn``
-object:
-
-  * ``hmf.logmassarray`` for log10 of mass bin.
-  * ``hmf.massarray`` for mass bin.
-  * ``hmf.dn_M_z`` for (dn/dM)*dM (analytical fit).
-  * ``hmf.nofmz_cum`` for cumulative number density of halos (analytical fit).
-  * ``hmf.dis`` for cumulative number density of halos (from provided halo
-    halo information).
+and the file `hmf-simulated.dat' with columns
+  * **mass (M_solar)
+  * **log10 of mass (M_solar)
+  * **cumulative number density of halos (comoving (Mpc/h)^3)
\ No newline at end of file

diff -r 129325961910bdb77f5eb4678b6b0e9869560a9f -r 025ee100a4355a4f7f1d824427906b633b1504ba yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -73,7 +73,7 @@
         Whether or not to calculate the analytic mass function to go with 
         the simulated halo mass function.  Automatically set to true if a 
         simulation dataset is provided.
-        Default : False.
+        Default : True.
     omega_matter0 : float
         The fraction of the universe made up of matter (dark and baryonic). 
         Default : 0.2726.
@@ -87,7 +87,7 @@
     hubble0 : float 
         The expansion rate of the universe in units of 100 km/s/Mpc. 
         Default : 0.704.
-    sigma8input : float 
+    sigma8 : float 
         The amplitude of the linear power spectrum at z=0 as specified by 
         the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
         8 Mpc/h. This is not always stored in the datset and should be 
@@ -131,7 +131,7 @@
     using as many cosmological parameters as can be pulled from the dataset.
 
     >>> halos_ds = load("rockstar_halos/halo_0.0.bin")
-    >>> hmf = HaloMassFcn(halos_ds=halos_ds, make_analytic=True)
+    >>> hmf = HaloMassFcn(halos_ds=halos_ds)
     >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
     >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
@@ -146,7 +146,7 @@
     >>> plt.savefig("mass_function.png")
     
     This creates the analytic mass function for an arbitrary set of 
-    cosmological parameters, without either a simulation or halo dataset.
+    cosmological parameters, with neither a simulation nor halo dataset.
 
     >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
                           omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
@@ -154,9 +154,9 @@
     >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
     """
-    def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=False, 
+    def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=True, 
     omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, 
-    sigma8input=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
+    sigma8=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
     log_mass_max=None, num_sigma_bins=360, fitting_function=4):
         ParallelAnalysisInterface.__init__(self)
         self.simulation_ds = simulation_ds
@@ -165,7 +165,7 @@
         self.omega_lambda0 = omega_lambda0
         self.omega_baryon0 = omega_baryon0
         self.hubble0 = hubble0
-        self.sigma8input = sigma8input
+        self.sigma8 = sigma8
         self.primordial_index = primordial_index
         self.this_redshift = this_redshift
         self.log_mass_min = log_mass_min
@@ -181,17 +181,12 @@
         """
         # If we don't have any datasets, make the analytic function with user values
         if simulation_ds is None and halos_ds is None:
-            self.make_analytic=True
             # Set a reasonable mass min and max if none were provided
             if log_mass_min is None:
                 self.log_mass_min = 5
             if log_mass_max is None:
                 self.log_mass_max = 15
 
-        # If we are given a simulation dataset, make the analytic mass function
-        if simulation_ds is not None:
-            self.make_analytic = True
-
         # If we're making the analytic function...
         if self.make_analytic == True:
             # Try to set cosmological parameters from the simulation dataset
@@ -293,14 +288,14 @@
                 fp = self.comm.write_on_root(haloname)
                 line = \
                 """#Columns:
-#1. log10 of mass (M_solar)
-#2. mass (M_solar)
+#1. mass (M_solar)
+#2. log10 of mass (M_solar)
 #3. cumulative number density of halos (comoving (Mpc/h)^3)
 """
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
-                    line = "%e\t%e\t%e\n" % (np.log10(self.masses_sim[i]), 
-                    self.masses_sim[i],
+                    line = "%e\t%e\t%e\n" % (self.masses_sim[i], 
+                    np.log10(self.masses_sim[i])
                     self.n_cumulative_sim[i])
                     fp.write(line)
                 fp.close()
@@ -348,7 +343,7 @@
         R = 8.0;  # in units of Mpc/h (comoving)
 
         sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R));
-        sigma_normalization = self.sigma8input / sigma8_unnorm;
+        sigma_normalization = self.sigma8 / sigma8_unnorm;
 
         # rho0 in units of h^2 Msolar/Mpc^3
         rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\


https://bitbucket.org/yt_analysis/yt/commits/e033db6e9bfe/
Changeset:   e033db6e9bfe
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-11 18:32:13
Summary:     updated narrative docs
Affected #:  1 file

diff -r 025ee100a4355a4f7f1d824427906b633b1504ba -r e033db6e9bfe70dd1bfa8d9fb298f3198fa3b287 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -183,6 +183,7 @@
 
 Outputs
 -------
+
 A HaloMassFnc object has several arrays hanging off of it containing the 
   * **masses_sim: Halo masses from simulated halos. Units: M_solar
 


https://bitbucket.org/yt_analysis/yt/commits/3ef3df8d6fbe/
Changeset:   3ef3df8d6fbe
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-11 21:25:49
Summary:     Added some possibly functional halo mass function tests.
Affected #:  2 files

diff -r e033db6e9bfe70dd1bfa8d9fb298f3198fa3b287 -r 3ef3df8d6fbedc2d0882766a2d415e8c5932ecb1 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -40,3 +40,14 @@
     for test in big_patch_amr(g30, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+
+hds0 = "rockstar_halos/halos_0.0.bin"
+hds1 = "rockstar_halos/halos_0.1.bin"
+ at requires_pf(hds0)
+ at requires_pf(hds1)
+def test_halo_mass_function():
+	hds = data_dir_load(hds0)
+	yield assert_equal, str(hds), "halos_0.0.bin"
+	for test in hfm_sim_and_analytic(hds0)
+		test_halo_mass_function.__name__ = test.description
+		yield test
\ No newline at end of file

diff -r e033db6e9bfe70dd1bfa8d9fb298f3198fa3b287 -r 3ef3df8d6fbedc2d0882766a2d415e8c5932ecb1 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -586,6 +586,32 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+class HaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "HaloMassFunction"
+    _attrs = ()
+
+    def run(self):
+        result = {}
+        hmf = HaloMassFunction(halos_ds=self.hc)
+        result["masses_sim"] = hmf.masses_sim
+        result["n_cumulative_sim"] = hmf.n_cumulative_sim
+        result["masses_analytic"] = hmf.masses_analytic
+        result["n_cumulative_analytic"] = hmf.n_cumulative_analytic
+        result["dndM_dM_analytic"] = hmf.dndM_dM_analytic
+        return result
+
+    def compare(self, new_result, old_result):
+        for newms, oldms in zip(new_result['masses_sim'], old_result['masses_sim']):
+            assert(newms, oldms)
+        for newncs, oldncs in zip(new_result['n_cumulative_sim'], old_result['n_cumulative_sim']):
+            assert(newncs, oldncs)
+        for newma, oldma in zip(new_result['masses_analytic'], old_result['masses_analytic']):
+            assert(newma, oldma)
+        for newnca, oldnca in zip(new_result['n_cumulative_analytic'], old_result['n_cumulative_analytic']):
+            assert(newnca, oldnca)
+        for newdndmdma, olddndmdma in zip(new_result['dndM_dM_analytic'], old_result['dndM_dM_analytic']):
+            assert(newdndmdma, olddndmdma)
+
 def compare_image_lists(new_result, old_result, decimals):
     fns = ['old.png', 'new.png']
     num_images = len(old_result)
@@ -731,6 +757,11 @@
                         pf_fn, axis, field, weight_field,
                         ds)
 
+def hmf_sim_and_analytic(halos_ds):
+    if not can_run_pf(halos_ds): return
+    yield HaloMassFunctionTest(halos_ds)
+
+
 def create_obj(pf, obj_type):
     # obj_type should be tuple of
     #  ( obj_name, ( args ) )


https://bitbucket.org/yt_analysis/yt/commits/689e68fe692b/
Changeset:   689e68fe692b
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-14 18:35:02
Summary:     Fixed typo in write_out(), analytic mass function output arrays are now given yt units in situations where a dataset with cosmological information has been provided. Comoving Mpc still need to be added as a unit for cases where all cosmological parameters are specified by hand.
Affected #:  1 file

diff -r 3ef3df8d6fbedc2d0882766a2d415e8c5932ecb1 -r 689e68fe692b7fb34ea8f2599a0019913c84a71c yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -112,7 +112,7 @@
         The log10 of the mass of the maximum of the halo mass range. This is
         set automatically by the range of halo masses if a simulated halo 
         dataset is provided. If a halo dataset if not provided and no value
-        is specified, it will be set to 15. Units: M_solar
+        is specified, it will be set to 16. Units: M_solar
         Default : None.
     num_sigma_bins : float
         The number of bins (points) to use for the calculation of the 
@@ -185,7 +185,7 @@
             if log_mass_min is None:
                 self.log_mass_min = 5
             if log_mass_max is None:
-                self.log_mass_max = 15
+                self.log_mass_max = 16
 
         # If we're making the analytic function...
         if self.make_analytic == True:
@@ -199,7 +199,7 @@
                 if log_mass_min is None:
                     self.log_mass_min = 5
                 if log_mass_max is None:
-                    self.log_mass_max = 15
+                    self.log_mass_max = 16
             # If we have a halo dataset but not a simulation dataset, use that instead
             if simulation_ds is None and halos_ds is not None:
                 self.omega_matter0 = self.halos_ds.omega_matter
@@ -216,6 +216,18 @@
             self.dndm()
             # Return the mass array in M_solar rather than M_solar/h
             self.masses_analytic *= self.hubble0
+            # The halo arrays will already have yt units, but the analytic forms do 
+            # not. If a dataset has been provided, use that to give them units.
+            if simulation_ds is not None:
+                self.masses_analytic = simulation_ds.arr(self.masses_analytic, "Msun")
+                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic,
+                                             "Mpccm**(-3)")
+            elif halos_ds is not None:
+                self.masses_analytic = halos_ds.arr(self.masses_analytic, "Msun")
+                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, 
+                                             "Mpccm**(-3)")
+            else:
+                self.masses_analytic = YTArray(self.masses_analytic, "Msun")
 
         """
         If a halo file has been supplied, make a mass function for the simulated halos.
@@ -295,7 +307,7 @@
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
                     line = "%e\t%e\t%e\n" % (self.masses_sim[i], 
-                    np.log10(self.masses_sim[i])
+                    np.log10(self.masses_sim[i]),
                     self.n_cumulative_sim[i])
                     fp.write(line)
                 fp.close()


https://bitbucket.org/yt_analysis/yt/commits/dd1727f0301d/
Changeset:   dd1727f0301d
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-14 19:27:13
Summary:     A new unit registry is created for the halo mass function if there isn't on available from a user-provided dataset, and analytic mass function arrays are returned with the correct associated units
Affected #:  1 file

diff -r 689e68fe692b7fb34ea8f2599a0019913c84a71c -r dd1727f0301dcddd39ef0759b3b5bcaf4e947f43 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -226,8 +226,19 @@
                 self.masses_analytic = halos_ds.arr(self.masses_analytic, "Msun")
                 self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, 
                                              "Mpccm**(-3)")
+            # It we don't have a dataset to get units from, make a new units registry
             else:
-                self.masses_analytic = YTArray(self.masses_analytic, "Msun")
+                from yt.units.dimensions import length
+                hmf.unit_registry = UnitRegistry()
+                for my_unit in ["m", "pc", "AU", "au"]:
+                    new_unit = "%scm" % my_unit
+                    hmf.unit_registry.add(new_unit, 
+                                        hmf.unit_registry.lut[my_unit][0] / 
+                                        (1 + self.this_redshift),
+                                        length, "\\rm{%s}/(1+z)" % my_unit)  
+                self.n_cumulative_analytic = hmf.unit_registry.arr(self.n_cumulative_analytic, 
+                                                                    "Mpccm**(-3")
+                self.masses_analytic = hmf.unit_registry.arr(self.masses_analytic, "Msun")
 
         """
         If a halo file has been supplied, make a mass function for the simulated halos.


https://bitbucket.org/yt_analysis/yt/commits/1f9f4f151748/
Changeset:   1f9f4f151748
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-14 21:28:31
Summary:     Forgot to update the maximum halo mass in the narrative docs.
Affected #:  1 file

diff -r dd1727f0301dcddd39ef0759b3b5bcaf4e947f43 -r 1f9f4f151748e630e9f27b4695d43c14576490a3 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -167,7 +167,7 @@
     The log10 of the mass of the maximum of the halo mass range. This is
     set automatically by the range of halo masses if a simulated halo 
     dataset is provided. If a halo dataset if not provided and no value
-    is specified, it will be set to 15. Units: M_solar
+    is specified, it will be set to 16. Units: M_solar
     Default : None.
 
   * **num_sigma_bins (*float*)


https://bitbucket.org/yt_analysis/yt/commits/794d5b5e6edd/
Changeset:   794d5b5e6edd
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-15 18:46:14
Summary:     Import the halo mass function api before testing.
Affected #:  1 file

diff -r 1f9f4f151748e630e9f27b4695d43c14576490a3 -r 794d5b5e6edd4670c71acdd8c1f2d63e9bd5ee71 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -591,6 +591,7 @@
     _attrs = ()
 
     def run(self):
+        from yt.analysis_modules.halo_mass_function.api import *
         result = {}
         hmf = HaloMassFunction(halos_ds=self.hc)
         result["masses_sim"] = hmf.masses_sim


https://bitbucket.org/yt_analysis/yt/commits/9d6de42ecd29/
Changeset:   9d6de42ecd29
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-15 18:57:54
Summary:     missing colon caught by Nathan.
Affected #:  1 file

diff -r 794d5b5e6edd4670c71acdd8c1f2d63e9bd5ee71 -r 9d6de42ecd29ccadce2256d2c0f5167fc491eb06 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -48,6 +48,6 @@
 def test_halo_mass_function():
 	hds = data_dir_load(hds0)
 	yield assert_equal, str(hds), "halos_0.0.bin"
-	for test in hfm_sim_and_analytic(hds0)
+	for test in hfm_sim_and_analytic(hds0):
 		test_halo_mass_function.__name__ = test.description
 		yield test
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/930b035187dc/
Changeset:   930b035187dc
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-17 01:02:25
Summary:     Halo mass function units registry now works. yt units are better utilized, and the write_out() outputs and the arrays attached to the HaloMassFcn object now have the same units.
Affected #:  1 file

diff -r 9d6de42ecd29ccadce2256d2c0f5167fc491eb06 -r 930b035187dc065954e9f3d5090e146f3b8a2993 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -215,30 +215,32 @@
             self.sigmaM()
             self.dndm()
             # Return the mass array in M_solar rather than M_solar/h
-            self.masses_analytic *= self.hubble0
+            self.masses_analytic = YTArray(self.masses_analytic/self.hubble0, "Msun")
             # The halo arrays will already have yt units, but the analytic forms do 
-            # not. If a dataset has been provided, use that to give them units.
+            # not. If a dataset has been provided, use that to give them units. At the
+            # same time, convert to comoving (Mpc/h)^-3
             if simulation_ds is not None:
-                self.masses_analytic = simulation_ds.arr(self.masses_analytic, "Msun")
-                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic,
-                                             "Mpccm**(-3)")
+                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic * 
+                                                          self.hubble0**3, 
+                                                          "(Mpccm/h)**(-3)")
             elif halos_ds is not None:
-                self.masses_analytic = halos_ds.arr(self.masses_analytic, "Msun")
-                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, 
-                                             "Mpccm**(-3)")
-            # It we don't have a dataset to get units from, make a new units registry
+                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic * 
+                                                          self.hubble0**3, 
+                                                          "(Mpccm/h)**(-3)")
             else:
+                from yt.units.unit_registry import UnitRegistry
                 from yt.units.dimensions import length
-                hmf.unit_registry = UnitRegistry()
+                hmf_registry = UnitRegistry()
                 for my_unit in ["m", "pc", "AU", "au"]:
                     new_unit = "%scm" % my_unit
-                    hmf.unit_registry.add(new_unit, 
-                                        hmf.unit_registry.lut[my_unit][0] / 
-                                        (1 + self.this_redshift),
-                                        length, "\\rm{%s}/(1+z)" % my_unit)  
-                self.n_cumulative_analytic = hmf.unit_registry.arr(self.n_cumulative_analytic, 
-                                                                    "Mpccm**(-3")
-                self.masses_analytic = hmf.unit_registry.arr(self.masses_analytic, "Msun")
+                    hmf_registry.add(new_unit, 
+                                     hmf_registry.lut[my_unit][0] / 
+                                     (1 + self.this_redshift),
+                                     length, "\\rm{%s}/(1+z)" % my_unit)
+                self.n_cumulative_analytic = YTArray(self.n_cumulative_analytic * 
+                                                     self.hubble0**3, 
+                                                     "(Mpccm/h)**(-3)", 
+                                                     registry=hmf_registry)                          
 
         """
         If a halo file has been supplied, make a mass function for the simulated halos.
@@ -268,7 +270,7 @@
         # We're going to use indices to count the number of halos above a given mass
         masses_sim = np.sort(data_source['ParticleMassMsun'])
         # Determine the size of the simulation volume in comoving Mpc**3
-        sim_volume = (self.halos_ds.domain_width.in_units('Mpccm')/self.hubble0).prod()
+        sim_volume = self.halos_ds.domain_width.in_units('Mpccm/h').prod()
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)
         # We don't want repeated halo masses, and the unique indices tell us which values 
         # correspond to distinct halo masses.


https://bitbucket.org/yt_analysis/yt/commits/15c3b456c95f/
Changeset:   15c3b456c95f
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-17 01:15:59
Summary:     Spruced up the formatting in the narrative docs.
Affected #:  1 file

diff -r 930b035187dc065954e9f3d5090e146f3b8a2993 -r 15c3b456c95fd68ee7270889645995ff44ee413a doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -106,76 +106,76 @@
 Keyword Arguments
 -----------------
 
-  * **simulation_ds (*Simulation dataset object*)
+  * **simulation_ds** (*Simulation dataset object*)
     The loaded simulation dataset, used to set cosmological paramters.
     Default : None.
 
-  * **halos_ds (*Halo dataset object*)
+  * **halos_ds** (*Halo dataset object*)
     The halos from a simulation to be used for creation of the 
     halo mass function in the simulation.
     Default : None.
 
-  * **make_analytic (*bool*)
+  * **make_analytic** (*bool*)
     Whether or not to calculate the analytic mass function to go with 
     the simulated halo mass function.  Automatically set to true if a 
     simulation dataset is provided.
     Default : True.
 
-  * **omega_matter0 (*float*)
+  * **omega_matter0** (*float*)
     The fraction of the universe made up of matter (dark and baryonic). 
     Default : 0.2726.
 
-  * **omega_lambda0 (*float*)
+  * **omega_lambda0** (*float*)
     The fraction of the universe made up of dark energy. 
     Default : 0.7274.
 
-  * **omega_baryon0  (*float*)
+  * **omega_baryon0**  (*float*)
     The fraction of the universe made up of baryonic matter. This is not 
     always stored in the datset and should be checked by hand.
     Default : 0.0456.
 
-  * **hubble0 (*float*)
+  * **hubble0** (*float*)
     The expansion rate of the universe in units of 100 km/s/Mpc. 
     Default : 0.704.
 
-  * **sigma8 (*float*)
+  * **sigma8** (*float*)
     The amplitude of the linear power spectrum at z=0 as specified by 
     the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
     8 Mpc/h. This is not always stored in the datset and should be 
     checked by hand.
     Default : 0.86.
 
-  * **primoridal_index (*float*)
+  * **primoridal_index** (*float*)
     This is the index of the mass power spectrum before modification by 
     the transfer function. A value of 1 corresponds to the scale-free 
     primordial spectrum. This is not always stored in the datset and 
     should be checked by hand.
     Default : 1.0.
 
-  * **this_redshift (*float*)
+  * **this_redshift** (*float*)
     The current redshift. 
     Default : 0.
 
-  * **log_mass_min (*float*)
+  * **log_mass_min** (*float*)
     The log10 of the mass of the minimum of the halo mass range. This is
     set automatically by the range of halo masses if a simulated halo 
     dataset is provided. If a halo dataset if not provided and no value
     is specified, it will be set to 5. Units: M_solar
     Default : None.
 
-  * **log_mass_max (*float*)
+  * **log_mass_max** (*float*)
     The log10 of the mass of the maximum of the halo mass range. This is
     set automatically by the range of halo masses if a simulated halo 
     dataset is provided. If a halo dataset if not provided and no value
     is specified, it will be set to 16. Units: M_solar
     Default : None.
 
-  * **num_sigma_bins (*float*)
+  * **num_sigma_bins** (*float*)
     The number of bins (points) to use for the calculation of the 
     analytic mass function. 
     Default : 360.
 
-  * **fitting_function (*int*)
+  * **fitting_function** (*int*)
     Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
     3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
     Default : 4.
@@ -185,18 +185,18 @@
 -------
 
 A HaloMassFnc object has several arrays hanging off of it containing the 
-  * **masses_sim: Halo masses from simulated halos. Units: M_solar
+  * **masses_sim**: Halo masses from simulated halos. Units: M_solar
 
-  * **n_cumulative_sim: Number density of halos with mass greater than the 
+  * **n_cumulative_sim**: Number density of halos with mass greater than the 
     corresponding mass in masses_sim. Units: comoving (Mpc/h)^-3
 
-  * **masses_analytic: Masses used for the generation of the analytic mass 
+  * **masses_analytic**: Masses used for the generation of the analytic mass 
     function. Units: M_solar
 
-  * **n_cumulative_analytic: Number density of halos with mass greater then 
+  * **n_cumulative_analytic**: Number density of halos with mass greater then 
     the corresponding mass in masses_analytic. Units: comoving (Mpc/h)^-3
 
-  * **dndM_dM_analytic: Differential number density of halos, (dn/dM)*dM.
+  * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
 
 After the mass function has been created for both simulated halos and the
 corresponding analytic fits, they can be plotted though something along the 
@@ -218,11 +218,11 @@
   hmf.write_out(prefix='hmf', analytic=True, simulated=True)
 
 This writes the files `hmf-analytic.dat' with columns 
-  * **mass (M_solar)
-  * **(dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-  * **cumulative number density of halos (comoving (Mpc/h)^3)
+  * **mass** (M_solar)
+  * **(dn/dM)*dM** (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
+  * **cumulative number density of halos** (comoving (Mpc/h)^3)
 
 and the file `hmf-simulated.dat' with columns
-  * **mass (M_solar)
-  * **log10 of mass (M_solar)
-  * **cumulative number density of halos (comoving (Mpc/h)^3)
\ No newline at end of file
+  * **mass** (M_solar)
+  * **log10 of mass** (M_solar)
+  * **cumulative number density of halos** (comoving (Mpc/h)^3)
\ No newline at end of file


https://bitbucket.org/yt_analysis/yt/commits/9d1c4bf7ad5b/
Changeset:   9d1c4bf7ad5b
Branch:      yt-3.0
User:        bcrosby
Date:        2014-04-17 01:22:36
Summary:     Columns that are written out for the analytic and simulated mass functions are now in the same order.
Affected #:  1 file

diff -r 15c3b456c95fd68ee7270889645995ff44ee413a -r 9d1c4bf7ad5b60c93c31ca1ad4dc89e06fa85801 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -292,13 +292,14 @@
                 line = \
                 """#Columns:
 #1. mass (M_solar)
-#2. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-#3. cumulative number density of halos (comoving (Mpc/h)^3)
+#2. cumulative number density of halos (comoving (Mpc/h)^3)
+#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
 """
                 fp.write(line)
                 for i in xrange(self.masses_analytic.size - 1):
                     line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
-                    self.dndM_dM_analytic[i], self.n_cumulative_analytic[i])
+                    self.n_cumulative_analytic[i], 
+                    self.dndM_dM_analytic[i])
                     fp.write(line)
                 fp.close()
             # If the analytic halo mass function wasn't created, warn the user
@@ -314,13 +315,11 @@
                 line = \
                 """#Columns:
 #1. mass (M_solar)
-#2. log10 of mass (M_solar)
-#3. cumulative number density of halos (comoving (Mpc/h)^3)
+#2. cumulative number density of halos (comoving (Mpc/h)^3)
 """
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
-                    line = "%e\t%e\t%e\n" % (self.masses_sim[i], 
-                    np.log10(self.masses_sim[i]),
+                    line = "%e\t%e\n" % (self.masses_sim[i], 
                     self.n_cumulative_sim[i])
                     fp.write(line)
                 fp.close()


https://bitbucket.org/yt_analysis/yt/commits/ab0d4e039c6f/
Changeset:   ab0d4e039c6f
Branch:      yt-3.0
User:        bcrosby
Date:        2014-05-12 15:35:35
Summary:     All units are now consistent, cleaned up syntax errors in docs.
Affected #:  2 files

diff -r 9d1c4bf7ad5b60c93c31ca1ad4dc89e06fa85801 -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -61,6 +61,7 @@
 arguments and let it use the default cosmological parameters.
 
 ..code-block:: python
+
   from yt.analysis_modules.halo_mass_function.api import *
 
   hmf = HaloMassFcn()
@@ -73,6 +74,7 @@
 range of halo masses.
 
 ..code-block:: python
+
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
@@ -83,17 +85,19 @@
 to create an analytic mass function.
 
 ..code-block:: python
+
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
   my_ds = load("RD0027/RedshiftOutput0027")
-  hmf = HaloMassFcn(ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
                     sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
 
 The analytic mass function can be created for a set of arbitrary cosmological 
 parameters without any dataset being passed as an argument.
 
 ..code-block:: python
+
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
 
@@ -188,13 +192,13 @@
   * **masses_sim**: Halo masses from simulated halos. Units: M_solar
 
   * **n_cumulative_sim**: Number density of halos with mass greater than the 
-    corresponding mass in masses_sim. Units: comoving (Mpc/h)^-3
+    corresponding mass in masses_sim. Units: comoving Mpc^-3
 
   * **masses_analytic**: Masses used for the generation of the analytic mass 
     function. Units: M_solar
 
   * **n_cumulative_analytic**: Number density of halos with mass greater then 
-    the corresponding mass in masses_analytic. Units: comoving (Mpc/h)^-3
+    the corresponding mass in masses_analytic. Units: comoving Mpc^-3
 
   * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
 
@@ -202,6 +206,7 @@
 corresponding analytic fits, they can be plotted though something along the 
 lines of
 .. code-block:: python
+
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
   import matplotlib.pyplot as plt
@@ -215,6 +220,7 @@
 Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
 halo mass function to a text file. (continued from above)
 .. code-block:: python
+
   hmf.write_out(prefix='hmf', analytic=True, simulated=True)
 
 This writes the files `hmf-analytic.dat' with columns 

diff -r 9d1c4bf7ad5b60c93c31ca1ad4dc89e06fa85801 -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -44,13 +44,13 @@
         Halo masses from simulated halos. Units: M_solar.
     n_cumulative_sim : Array
         Number density of halos with mass greater than the corresponding 
-        mass in masses_sim (simulated). Units: comoving (Mpc/h)^-3
+        mass in masses_sim (simulated). Units: comoving Mpc^-3
     masses_analytic : Array
         Masses used for the generation of the analytic mass function, Units:
         M_solar.
     n_cumulative_analytic : Array
         Number density of halos with mass greater then the corresponding
-        mass in masses_analytic (analytic). Units: comoving (Mpc/h)^-3
+        mass in masses_analytic (analytic). Units: comoving Mpc^-3
     dndM_dM_analytic : Array
         Differential number density of halos, (dn/dM)*dM (analytic).
 
@@ -218,15 +218,13 @@
             self.masses_analytic = YTArray(self.masses_analytic/self.hubble0, "Msun")
             # The halo arrays will already have yt units, but the analytic forms do 
             # not. If a dataset has been provided, use that to give them units. At the
-            # same time, convert to comoving (Mpc/h)^-3
+            # same time, convert to comoving (Mpc)^-3
             if simulation_ds is not None:
-                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic * 
-                                                          self.hubble0**3, 
-                                                          "(Mpccm/h)**(-3)")
+                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic, 
+                                                          "(Mpccm)**(-3)")
             elif halos_ds is not None:
-                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic * 
-                                                          self.hubble0**3, 
-                                                          "(Mpccm/h)**(-3)")
+                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, 
+                                                          "(Mpccm)**(-3)")
             else:
                 from yt.units.unit_registry import UnitRegistry
                 from yt.units.dimensions import length
@@ -236,11 +234,11 @@
                     hmf_registry.add(new_unit, 
                                      hmf_registry.lut[my_unit][0] / 
                                      (1 + self.this_redshift),
-                                     length, "\\rm{%s}/(1+z)" % my_unit)
-                self.n_cumulative_analytic = YTArray(self.n_cumulative_analytic * 
-                                                     self.hubble0**3, 
-                                                     "(Mpccm/h)**(-3)", 
-                                                     registry=hmf_registry)                          
+                                     length, "\\rm{%s}/(1+z)" % my_unit)                         
+                self.n_cumulative_analytic = YTArray(self.n_cumulative_analytic, 
+                                                     "(Mpccm)**(-3)", 
+                                                     registry=hmf_registry) 
+
 
         """
         If a halo file has been supplied, make a mass function for the simulated halos.
@@ -270,7 +268,7 @@
         # We're going to use indices to count the number of halos above a given mass
         masses_sim = np.sort(data_source['ParticleMassMsun'])
         # Determine the size of the simulation volume in comoving Mpc**3
-        sim_volume = self.halos_ds.domain_width.in_units('Mpccm/h').prod()
+        sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)
         # We don't want repeated halo masses, and the unique indices tell us which values 
         # correspond to distinct halo masses.
@@ -370,8 +368,8 @@
         sigma_normalization = self.sigma8 / sigma8_unnorm;
 
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
-               .in_units('Msun/Mpc**3')
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2,
+                          'g/cm**3').in_units('Msun/Mpc**3')
         rho0 = rho0.value.item()       
 
         # spacing in mass of our sigma calculation
@@ -405,8 +403,8 @@
     def dndm(self):
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2, 'g/cm**3')\
-               .in_units('Msun/Mpc**3')
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2, 
+                          'g/cm**3').in_units('Msun/Mpc**3')
         rho0 = rho0.value.item()
 
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)


https://bitbucket.org/yt_analysis/yt/commits/33efc5e11a01/
Changeset:   33efc5e11a01
Branch:      yt-3.0
User:        bcrosby
Date:        2014-05-12 17:42:05
Summary:     merged
Affected #:  307 files

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -34,11 +34,11 @@
  * Do not import "*" from anything other than "yt.funcs".
  * Internally, only import from source files directly -- instead of:
 
-   from yt.visualization.api import PlotCollection
+   from yt.visualization.api import ProjectionPlot
 
    do:
 
-   from yt.visualization.plot_collection import PlotCollection
+   from yt.visualization.plot_window import ProjectionPlot
 
  * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -43,7 +43,7 @@
 To indicate the return type of a given object, you can reference it using this
 construction:
 
-    This function returns a :class:`PlotCollection`.
+    This function returns a :class:`ProjectionPlot`.
 
 To reference a function, you can use:
 
@@ -51,4 +51,4 @@
 
 To reference a method, you can use:
 
-    To add a projection, use :meth:`PlotCollection.add_projection`.
+    To add a projection, use :meth:`ProjectionPlot.set_width`.

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -590,7 +590,7 @@
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.8'
+MERCURIAL='mercurial-3.0'
 NOSE='nose-1.3.0'
 NUMPY='numpy-1.7.1'
 PYTHON_HGLIB='python-hglib-1.0'
@@ -619,7 +619,7 @@
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8  mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
+echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
 echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -0,0 +1,410 @@
+{
+ "metadata": {
+  "name": ""
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "heading",
+     "level": 1,
+     "metadata": {},
+     "source": [
+      "Full Halo Analysis"
+     ]
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Creating a Catalog"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.halo_analysis.api import *\n",
+      "import tempfile\n",
+      "import shutil\n",
+      "import os\n",
+      "\n",
+      "# Create temporary directory for storing files\n",
+      "tmpdir = tempfile.mkdtemp()\n",
+      "\n",
+      "# Load the data set with the full simulation information\n",
+      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Load the rockstar data files\n",
+      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Instantiate a catalog using those two paramter files\n",
+      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "# Filter out less massive halos\n",
+      "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Finding Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n",
+      "hc.add_callback(\"sphere\", factor=2.0)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n",
+      "hc.add_callback(\"profile\", x_field=\"radius\",\n",
+      "                y_fields=[(\"gas\", \"overdensity\")],\n",
+      "                weight_field=\"cell_volume\", \n",
+      "                accumulation=False,\n",
+      "                storage=\"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Define a virial radius for the halo.\n",
+      "hc.add_callback(\"virial_quantities\", [\"radius\"], \n",
+      "                profile_storage = \"virial_quantities_profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated the virial radius, we delete the profiles we used to find it."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('delete_attribute','virial_quantities_profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('sphere', radius_field='radius_200', factor=5,\n",
+      "                field_parameters=dict(virial_radius=('quantity', 'radius_200')))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc.add_callback('profile', 'virial_radius', [('gas','temperature')],\n",
+      "                storage='virial_profiles',\n",
+      "                weight_field='cell_mass', \n",
+      "                accumulation=False, output_dir='profiles')\n"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Save the profiles\n",
+      "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc.create()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Reloading HaloCatalogs"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "\n",
+      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      " Just as profiles are saved seperately throught the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "hc_reloaded.add_callback('load_profiles', storage='virial_profiles',\n",
+      "                         output_dir='profiles')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Calling `load` is the equivalent of calling `create` earlier, but defaults to to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": true,
+     "input": [
+      "hc_reloaded.load()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Plotting Radial Profiles"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "halo = hc_reloaded.halo_list[0]\n",
+      "\n",
+      "radius = halo.virial_profiles['virial_radius']\n",
+      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]\n",
+      "\n",
+      "# Remove output files, that are no longer needed\n",
+      "shutil.rmtree(tmpdir)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "import matplotlib.pyplot as plt\n",
+      "\n",
+      "plt.plot(radius, temperature)\n",
+      "\n",
+      "plt.semilogy()\n",
+      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
+      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "\n",
+      "plt.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -0,0 +1,237 @@
+{
+ "metadata": {
+  "name": "",
+  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+ },
+ "nbformat": 3,
+ "nbformat_minor": 0,
+ "worksheets": [
+  {
+   "cells": [
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Detailed spectra of astrophysical objects sometimes allow for determinations of how much of the gas is moving with a certain velocity along the line of sight, thanks to Doppler shifting of spectral lines. This enables \"data cubes\" to be created in RA, Dec, and line-of-sight velocity space. In yt, we can use the `PPVCube` analysis module to project fields along a given line of sight traveling at different line-of-sight velocities, to \"mock-up\" what would be seen in observations."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To demonstrate this functionality, we'll create a simple unigrid dataset from scratch of a rotating disk galaxy. We create a thin disk in the x-y midplane of the domain of three cells in height in either direction, and a radius of 10 kpc. The density and azimuthal velocity profiles of the disk as a function of radius will be given by the following functions:"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Density: $\\rho(r) \\propto r^{\\alpha}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Velocity: $v_{\\theta}(r) \\propto \\frac{r}{1+(r/r_0)^{\\beta}}$"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "where for simplicity we won't worry about the normalizations of these profiles. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "%matplotlib inline\n",
+      "from yt.mods import *\n",
+      "from yt.analysis_modules.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
+      "nx,ny,nz = (256,256,256)\n",
+      "R = 10. # kpc\n",
+      "r_0 = 3. # kpc\n",
+      "beta = 1.4\n",
+      "alpha = -1.\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates\n",
+      "dens = np.zeros((nx,ny,nz))\n",
+      "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
+      "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
+      "velx = np.zeros((nx,ny,nz))\n",
+      "vely = np.zeros((nx,ny,nz))\n",
+      "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "data[\"density\"] = (dens,\"g/cm**3\")\n",
+      "data[\"velocity_x\"] = (velx, \"km/s\")\n",
+      "data[\"velocity_y\"] = (vely, \"km/s\")\n",
+      "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
+      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To get a sense of what the data looks like, we'll take a slice through the middle of the disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc.set_log(\"velocity_x\", False)\n",
+      "slc.set_log(\"velocity_y\", False)\n",
+      "slc.set_log(\"velocity_magnitude\", False)\n",
+      "slc.set_unit(\"velocity_magnitude\", \"km/s\")\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Which shows a rotating disk with a specific density and velocity profile. Now, suppose we wanted to look at this disk galaxy from a certain orientation angle, and simulate a 3D FITS data cube where we can see the gas that is emitting at different velocities along the line of sight. We can do this using the `PPVCube` class. First, let's assume we rotate our viewing angle 60 degrees from face-on, from along the z-axis into the y-axis. We'll create a normal vector:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "i = 60.*np.pi/180.\n",
+      "L = [0.0,np.sin(i),np.sin(i)]"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Next, we need to specify a field that will serve as the \"intensity\" of the emission that we see. For simplicity, we'll simply choose the gas density as this field, though it could be any field (including derived fields) in principle. We also need to specify the dimensions of the data cube, and optionally we may choose the bounds in line-of-sight velocity that the data will be binned into. Otherwise, the bounds will simply be set to the negative and positive of the largest speed in the dataset."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Following this, we can now write this cube to a FITS file:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "cube.write_fits(\"cube.fits\", clobber=True, length_unit=(5.0,\"deg\"))"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Now, we'll look at the FITS dataset in yt and look at different slices along the velocity axis, which is the \"z\" axis:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "ds = load(\"cube.fits\")\n",
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# To figure out what the domain center and width is in pixel (code length) units:\n",
+      "print ds.domain_center\n",
+      "print ds.domain_width"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    }
+   ],
+   "metadata": {}
+  }
+ ]
+}
\ No newline at end of file

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:874e85c86cd80a516bb61775b566cd46766c60bdf8f865336bf9dd3505f83821"
+  "signature": "sha256:e4b5ea69687eb79452c16385b3a6f795b4572518dfa7f9d8a8125bd75b5fea85"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,9 +21,11 @@
      "input": [
       "%matplotlib inline\n",
       "from yt.mods import *\n",
-      "from yt.analysis_modules.api import ParticleTrajectories\n",
+      "import glob\n",
+      "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
-      "path = ytcfg.get(\"yt\", \"test_data_dir\")"
+      "path = ytcfg.get(\"yt\", \"test_data_dir\")\n",
+      "import matplotlib.pyplot as plt"
      ],
      "language": "python",
      "metadata": {},
@@ -75,8 +77,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(my_fns[0])\n",
-      "dd = pf.h.all_data()\n",
+      "ds = load(my_fns[0])\n",
+      "dd = ds.all_data()\n",
       "indices = dd[\"particle_index\"].astype(\"int\")\n",
       "print indices"
      ],
@@ -130,8 +132,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_position_x\"][0], trajs[\"particle_position_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_position_x\"][1], trajs[\"particle_position_y\"][1])"
+      "plt.plot(trajs[\"particle_position_x\"][0].ndarray_view(), trajs[\"particle_position_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_position_x\"][1].ndarray_view(), trajs[\"particle_position_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -148,8 +150,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_velocity_x\"][0], trajs[\"particle_velocity_y\"][0])\n",
-      "pylab.plot(trajs[\"particle_velocity_x\"][1], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_velocity_x\"][0].ndarray_view(), trajs[\"particle_velocity_y\"][0].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_velocity_x\"][1].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -166,8 +168,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_x\"][1])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_velocity_y\"][1])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_x\"][1].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_velocity_y\"][1].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -185,8 +187,8 @@
      "collapsed": false,
      "input": [
       "particle1 = trajs.trajectory_from_index(1)\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_x\"])\n",
-      "pylab.plot(particle1[\"particle_time\"], particle1[\"particle_position_y\"])"
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_x\"].ndarray_view())\n",
+      "plt.plot(particle1[\"particle_time\"].ndarray_view(), particle1[\"particle_position_y\"].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -203,8 +205,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "slc = SlicePlot(pf, \"x\", [\"Density\",\"Dark_Matter_Density\"], center=\"max\", width=(3.0, \"mpc\"))\n",
+      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "slc = SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
       "slc.show()"
      ],
      "language": "python",
@@ -222,7 +224,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "sp = pf.sphere(\"max\", (0.5, \"mpc\"))\n",
+      "sp = ds.sphere(\"max\", (0.5, \"Mpc\"))\n",
       "indices = sp[\"particle_index\"][sp[\"particle_type\"] == 1]"
      ],
      "language": "python",
@@ -240,7 +242,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.index\")\n",
+      "my_fns = glob.glob(path+\"/enzo_tiny_cosmology/DD*/*.hierarchy\")\n",
       "my_fns.sort()\n",
       "trajs = ParticleTrajectories(my_fns, indices)"
      ],
@@ -263,9 +265,12 @@
       "from mpl_toolkits.mplot3d import Axes3D\n",
       "fig = plt.figure(figsize=(8.0, 8.0))\n",
       "ax = fig.add_subplot(111, projection='3d')\n",
-      "ax.plot(trajs[\"particle_position_x\"][100], trajs[\"particle_position_z\"][100], trajs[\"particle_position_z\"][100])\n",
-      "ax.plot(trajs[\"particle_position_x\"][8], trajs[\"particle_position_z\"][8], trajs[\"particle_position_z\"][8])\n",
-      "ax.plot(trajs[\"particle_position_x\"][25], trajs[\"particle_position_z\"][25], trajs[\"particle_position_z\"][25])"
+      "ax.plot(trajs[\"particle_position_x\"][100].ndarray_view(), trajs[\"particle_position_z\"][100].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][100].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][8].ndarray_view(), trajs[\"particle_position_z\"][8].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][8].ndarray_view())\n",
+      "ax.plot(trajs[\"particle_position_x\"][25].ndarray_view(), trajs[\"particle_position_z\"][25].ndarray_view(), \n",
+      "        trajs[\"particle_position_z\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -282,9 +287,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"particle_position_x\"][25])"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"particle_position_x\"][25].ndarray_view())"
      ],
      "language": "python",
      "metadata": {},
@@ -301,7 +306,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.add_fields([\"Density\"])"
+      "trajs.add_fields([\"density\"])"
      ],
      "language": "python",
      "metadata": {},
@@ -311,17 +316,17 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "We also could have included `\"Density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
+      "We also could have included `\"density\"` in our original field list. Now, plot up the gas density for each particle as a function of time:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][100])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][8])\n",
-      "pylab.plot(trajs[\"particle_time\"], trajs[\"Density\"][25])\n",
-      "pylab.yscale(\"log\")"
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][100].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][8].ndarray_view())\n",
+      "plt.plot(trajs[\"particle_time\"].ndarray_view(), trajs[\"density\"][25].ndarray_view())\n",
+      "plt.yscale(\"log\")"
      ],
      "language": "python",
      "metadata": {},
@@ -338,29 +343,12 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "trajs.write_out(\"halo_trajectories.txt\")\n",
-      "trajs.write_out_h5(\"halo_trajectories.h5\")"
+      "trajs.write_out(\"halo_trajectories\") # This will write a separate file for each trajectory\n",
+      "trajs.write_out_h5(\"halo_trajectories.h5\") # This will write all trajectories to a single file"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 2,
-     "metadata": {},
-     "source": [
-      "Important Caveats"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "* Parallelization is not yet implemented.\n",
-      "* For large datasets, constructing trajectories can be very slow. We are working on optimizing the algorithm for a future release. \n",
-      "* At the moment, trajectories are limited for particles that exist in every dataset. Therefore, for codes like FLASH that allow for particles to exit the domain (and hence the simulation) for certain types of boundary conditions, you need to insure that the particles you wish to examine exist in all datasets in the time series from the beginning to the end. If this is not the case, `ParticleTrajectories` will throw an error. This is a limitation we hope to relax in a future release. "
-     ]
     }
    ],
    "metadata": {}

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,7 +58,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('RD0006/RD0006')
+  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
   halo_list = parallelHF(pf)
   halo_list.dump('MyHaloList')
 
@@ -69,7 +69,7 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('RD0006/RD0006')
+  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
   haloes = LoadHaloes(pf, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -1,14 +1,13 @@
 Halo Analysis
 =============
 
-Halo finding, mass functions, merger trees, and profiling.
+Using halo catalogs, understanding the different halo finding methods,
+and using the halo mass function.
 
 .. toctree::
    :maxdepth: 1
 
-   running_halofinder
+   halo_catalogs
+   halo_finding
    halo_mass_function
-   hmf_howto
-   merger_tree
-   halo_profiling
-   ellipsoid_analysis
+   halo_analysis_example

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -0,0 +1,4 @@
+Using HaloCatalogs to do Analysis
+---------------------------------
+
+.. notebook:: Halo_Analysis.ipynb

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -0,0 +1,229 @@
+
+Creating Halo Catalogs
+======================
+
+In yt 3.0, operations relating to the analysis of halos (halo finding,
+merger tree creation, and individual halo analysis) are all brought 
+together into a single framework. This framework is substantially
+different from the limited framework included in yt-2.x and is only 
+backwards compatible in that output from old halo finders may be loaded.
+
+A catalog of halos can be created from any initial dataset given to halo 
+catalog through data_pf. These halos can be found using friends-of-friends,
+HOP, and Rockstar. The finder_method keyword dictates which halo finder to
+use. The available arguments are 'fof', 'hop', and'rockstar'. For more
+details on the relative differences between these halo finders see 
+:ref:`halo_finding`.
+
+.. code-block:: python
+
+   from yt.mods import *
+   from yt.analysis_modules.halo_analysis.api import HaloCatalog
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+
+A halo catalog may also be created from already run rockstar outputs. 
+This method is not implemented for previously run friends-of-friends or 
+HOP finders. Even though rockstar creates one file per processor, 
+specifying any one file allows the full catalog to be loaded. Here we 
+only specify the file output by the processor with ID 0. Note that the 
+argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_pf=halos_pf)
+
+Although supplying only the binary output of the rockstar halo finder 
+is sufficient for creating a halo catalog, it is not possible to find 
+any new information about the identified halos. To associate the halos 
+with the dataset from which they were found, supply arguments to both 
+halos_pf and data_pf.
+
+.. code-block:: python
+
+   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
+   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+
+A data container can also be supplied via keyword data_source, 
+associated with either dataset, to control the spatial region in 
+which halo analysis will be performed.
+
+Analysis Using Halo Catalogs
+============================
+
+Analysis is done by adding actions to the HaloCatalog. Each action is 
+represented by a callback function that will be run on each halo. 
+There are three types of actions:
+
+    - Filters
+    - Quantities
+    - Callbacks
+
+All interaction with this analysis can be performed by importing from 
+halo_analysis.
+
+Filters
+-------
+
+A filter is a function that returns True or False. If the return value 
+is True, any further queued analysis will proceed and the halo in 
+question will be added to the final catalog. If the return value False, 
+further analysis will not be performed and the halo will not be included 
+in the final catalog.
+
+An example of adding a filter:
+
+.. code-block:: python
+
+   hc.add_filter('quantity_value', 'particle_mass', '>', 1E13, 'Msun')
+
+Currently quantity_value is the only available filter, but more can be 
+added by the user by defining a function that accepts a halo object as 
+the first argument and then adding it as an available filter. If you 
+think that your filter may be of use to the general community, you can 
+add it to yt/analysis_modules/halo_analysis/halo_filters.py and issue a 
+pull request.
+
+An example of defining your own filter:
+
+.. code-block:: python
+
+   def my_filter_function(halo):
+       
+       # Define condition for filter
+       filter_value = True
+       
+       # Return a boolean value 
+       return filter_value
+
+   # Add your filter to the filter registry
+   add_filter("my_filter", my_filter_function)
+
+   # ... Later on in your script
+   hc.add_filter("my_filter")
+
+Quantities
+----------
+
+A quantity is a call back that returns a value or values. The return values 
+are stored within the halo object in a dictionary called “quantities.” At 
+the end of the analysis, all of these quantities will be written to disk as 
+the final form of the generated “halo catalog.”
+
+Quantities may be available in the initial fields found in the halo catalog, 
+or calculated from a function after supplying a definition. An example 
+definition of center of mass is shown below. Currently available quantities 
+are center_of_mass and bulk_velocity. Their definitions are available in 
+yt/analysis_modules/halo_analysis/halo_quantities.py . If you think that 
+your quantity may be of use to the general community, add it to 
+halo_quantities.py and issue a pull request.
+
+An example of adding a quantity:
+
+.. code-block:: python
+
+   hc.add_quantity('center_of_mass')
+
+An example of defining your own quantity:
+
+.. code-block:: python
+
+   def my_quantity_function(halo):
+       # Define quantity to return
+       quantity = 5
+       
+       return quantity
+
+   # Add your filter to the filter registry
+   add_quantity('my_quantity', my_quantity_function)
+
+
+   # ... Later on in your script
+   hc.add_quantity("my_quantity") 
+
+Callbacks
+---------
+
+A callback is actually the super class for quantities and filters and 
+is a general purpose function that does something, anything, to a Halo 
+object. This can include hanging new attributes off the Halo object, 
+performing analysis and writing to disk, etc. A callback does not return 
+anything.
+
+An example of using a pre-defined callback where we create a sphere for 
+each halo with a radius that is twice the saved “radius”.
+
+.. code-block:: python
+
+   hc.add_callback("sphere", factor=2.0)
+    
+Currently available callbacks are located in 
+yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
+be added by using the syntax shown below. If you think that your 
+callback may be of use to the general community, add it to 
+halo_callbacks.py and issue a pull request
+
+An example of defining your own callback:
+
+.. code-block:: python
+
+   def my_callback_function(halo):
+       # Perform some callback actions here
+       x = 2
+       halo.x_val = x
+
+   # Add the callback to the callback registry
+   add_callback('my_callback', my_callback_function)
+
+
+   # ...  Later on in your script
+   hc.add_callback("my_callback")
+
+Running Analysis
+================
+
+After all callbacks, quantities, and filters have been added, the 
+analysis begins with a call to HaloCatalog.create.
+
+.. code-block:: python
+
+   hc.create()
+
+The save_halos keyword determines whether the actual Halo objects 
+are saved after analysis on them has completed or whether just the 
+contents of their quantities dicts will be retained for creating the 
+final catalog. The looping over halos uses a call to parallel_objects 
+allowing the user to control how many processors work on each halo. 
+The final catalog is written to disk int the output directory given 
+when the HaloCatalog object was created.
+
+All callbacks, quantities, and filters are stored in an “actions” list, 
+meaning that they are executed in the same order in which they were added. 
+This enables the use of simple, reusable, single action callbacks that 
+depend on each other. This also prevents unecessary computation by allowing 
+the user to add filters at multiple stages to skip remaining analysis if it 
+is not warranted.
+
+Saving and Reloading Halo Catalogs
+==================================
+
+A HaloCatalog saved to disk can be reloaded as yt dataset with the 
+standard call to load. Any side data, such as profiles, can be reloaded 
+with a load_profiles callback and a call to HaloCatalog.load.
+
+.. code-block:: python
+
+   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_pf=hpf,
+                    output_dir="halo_catalogs/catalog_0046")
+   hc.add_callback("load_profiles", output_dir="profiles",
+                   filename="virial_profiles")
+   hc.load()
+
+Summary
+=======
+
+For a full example of how to use these methods together see 
+:ref:`halo_analysis_example`.

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/ppv_cubes.rst
--- /dev/null
+++ b/doc/source/analyzing/analysis_modules/ppv_cubes.rst
@@ -0,0 +1,4 @@
+Creating Position-Position-Velocity FITS Cubes
+-------------------------------------------------
+
+.. notebook:: PPVCube.ipynb

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/analysis_modules/synthetic_observation.rst
--- a/doc/source/analyzing/analysis_modules/synthetic_observation.rst
+++ b/doc/source/analyzing/analysis_modules/synthetic_observation.rst
@@ -18,3 +18,4 @@
    sunyaev_zeldovich
    radial_column_density
    photon_simulator
+   ppv_cubes

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/particles.rst
--- a/doc/source/analyzing/particles.rst
+++ b/doc/source/analyzing/particles.rst
@@ -28,8 +28,7 @@
 the quantities (:ref:`derived-quantities`) in those objects will operate on
 particle fields.
 
-(For information on halo finding, see :ref:`cookbook-halo_finding` and
-:ref:`cookbook-halo_mass_info`.)
+(For information on halo finding, see :ref:`cookbook-halo_finding`)
 
 .. warning:: If you use the built-in methods of interacting with particles, you
              should be well off.  Otherwise, there are caveats!

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:b7541e0167001c6dd74306c8490385ace7bdb0533a829286f0505c0b24c67f16"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -291,6 +296,166 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "Round-Trip Conversions to and from AstroPy's Units System"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, a `YTArray` or `YTQuantity` may be converted to an [AstroPy quantity](http://astropy.readthedocs.org/en/latest/units/), which is a NumPy array or a scalar associated with units from AstroPy's units system. You may use this facility if you have AstroPy installed. "
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Some examples of converting from AstroPy units to yt:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from astropy import units as u\n",
+      "x = 42.0 * u.meter\n",
+      "y = YTQuantity(x)\n",
+      "y2 = YTQuantity.from_astropy(x) # Another way to create the quantity"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print x, type(x)\n",
+      "print y, type(y)\n",
+      "print y2, type(y2)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = np.random.random(size=10) * u.km/u.s\n",
+      "b = YTArray(a)\n",
+      "b2 = YTArray.from_astropy(a) # Another way to create the quantity"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print a, type(a)\n",
+      "print b, type(b)\n",
+      "print b2, type(b2)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "It also works the other way around, converting a `YTArray` or `YTQuantity` to an AstroPy quantity via the method `to_astropy`. For arrays:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "temp = dd[\"temperature\"]\n",
+      "atemp = temp.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print temp, type(temp)\n",
+      "print atemp, type(atemp)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "and quantities:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "from yt.utilities.physical_constants import kboltz\n",
+      "kb = kboltz.to_astropy()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print kboltz, type(kboltz)\n",
+      "print kb, type(kb)"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "As a sanity check, you can show that it works round-trip:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "k1 = kboltz.to_astropy()\n",
+      "k2 = YTQuantity(kb)\n",
+      "print k1 == k2"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "c = YTArray(a)\n",
+      "d = c.to_astropy()\n",
+      "print a == d"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/cookbook/aligned_cutting_plane.py
--- a/doc/source/cookbook/aligned_cutting_plane.py
+++ b/doc/source/cookbook/aligned_cutting_plane.py
@@ -1,18 +1,18 @@
-from yt.mods import *
+import yt
 
 # Load the dataset.
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030")
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-# Create a 1 kpc radius sphere, centered on the max density.  Note that this
-# sphere is very small compared to the size of our final plot, and it has a
-# non-axially aligned L vector.
-sp = pf.sphere("center", (15.0, "kpc"))
+# Create a 1 kpc radius sphere, centered on the maximum gas density.  Note
+# that this sphere is very small compared to the size of our final plot,
+# and it has a non-axially aligned L vector.
+sp = ds.sphere("m", (1.0, "kpc"))
 
 # Get the angular momentum vector for the sphere.
 L = sp.quantities.angular_momentum_vector()
 
-print "Angular momentum vector: %s" % (L)
+print "Angular momentum vector: {0}".format(L)
 
 # Create an OffAxisSlicePlot on the object with the L vector as its normal
-p = OffAxisSlicePlot(pf, L, "density", sp.center, (25, "kpc"))
+p = yt.OffAxisSlicePlot(ds, L, "density", sp.center, (15, "kpc"))
 p.save()

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -1,26 +1,28 @@
-## Using AMRKDTree Homogenized Volumes to examine large datasets at lower resolution.
+# Using AMRKDTree Homogenized Volumes to examine large datasets
+# at lower resolution.
 
 # In this example we will show how to use the AMRKDTree to take a simulation
 # with 8 levels of refinement and only use levels 0-3 to render the dataset.
 
 # We begin by loading up yt, and importing the AMRKDTree
+import numpy as np
 
-from yt.mods import *
+import yt
 from yt.utilities.amr_kdtree.api import AMRKDTree
 
 # Load up a data and print out the maximum refinement level
-pf = load('IsolatedGalaxy/galaxy0030/galaxy0030')
+ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
 
-kd = AMRKDTree(pf)
+kd = AMRKDTree(ds)
 # Print out the total volume of all the bricks
 print kd.count_volume()
 # Print out the number of cells
 print kd.count_cells()
 
-tf = ColorTransferFunction((-30, -22))
-cam = pf.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
-                 tf, volume=kd)
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
+tf = yt.ColorTransferFunction((-30, -22))
+cam = ds.h.camera([0.5, 0.5, 0.5], [0.2, 0.3, 0.4], 0.10, 256,
+                  tf, volume=kd)
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], colormap='RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
 # This rendering is okay, but lets say I'd like to improve it, and I don't want
@@ -28,7 +30,7 @@
 # generate a low resolution version of the AMRKDTree and pass that in to the
 # camera.  We do this by specifying a maximum refinement level of 3.
 
-kd_low_res = AMRKDTree(pf, l_max=3)
+kd_low_res = AMRKDTree(ds, max_level=3)
 print kd_low_res.count_volume()
 print kd_low_res.count_cells()
 
@@ -42,21 +44,21 @@
 # rendering until we find something we like.
 
 tf.clear()
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
+              alpha=np.ones(4, dtype='float64'), colormap='RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
 
-tf.grey_opacity=True
+tf.grey_opacity = True
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # That seemed to pick out som interesting structures.  Now let's bump up the
 # opacity.
 
 tf.clear()
-tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5],
+              alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree
@@ -65,4 +67,3 @@
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # This looks great!
-

diff -r ab0d4e039c6fb28651f1b4b1fafd54ee0b08c27b -r 33efc5e11a01e4553fe25c1e2056c48e973e687c doc/source/cookbook/average_value.py
--- a/doc/source/cookbook/average_value.py
+++ b/doc/source/cookbook/average_value.py
@@ -1,12 +1,12 @@
-from yt.mods import *
+import yt
 
-pf = load("IsolatedGalaxy/galaxy0030/galaxy0030") # load data
+ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")  # load data
 
 field = "temperature"  # The field to average
-weight = "cell_mass" # The weight for the average
+weight = "cell_mass"  # The weight for the average
 
-dd = pf.h.all_data() # This is a region describing the entire box,
-                     # but note it doesn't read anything in yet!
+dd = ds.h.all_data()  # This is a region describing the entire box,
+                      # but note it doesn't read anything in yet!
 # We now use our 'quantities' call to get the average quantity
 average_value = dd.quantities["WeightedAverageQuantity"](field, weight)
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/3c8094a821a5/
Changeset:   3c8094a821a5
Branch:      yt-3.0
User:        bcrosby
Date:        2014-05-12 20:58:16
Summary:     Stupid typo. hfm and hmf are not the same.
Affected #:  1 file

diff -r 33efc5e11a01e4553fe25c1e2056c48e973e687c -r 3c8094a821a5436e66040cb94c07026c9a5cf81a yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -76,7 +76,7 @@
 def test_halo_mass_function():
 	hds = data_dir_load(hds0)
 	yield assert_equal, str(hds), "halos_0.0.bin"
-	for test in hfm_sim_and_analytic(hds0):
+	for test in hmf_sim_and_analytic(hds0):
 		test_halo_mass_function.__name__ = test.description
 		yield test
 


https://bitbucket.org/yt_analysis/yt/commits/c7c77cffefa4/
Changeset:   c7c77cffefa4
Branch:      yt-3.0
User:        bcrosby
Date:        2014-05-12 21:42:12
Summary:     Now the test is actually imported
Affected #:  1 file

diff -r 3c8094a821a5436e66040cb94c07026c9a5cf81a -r c7c77cffefa4ecc7b04971269a58c4b4b560f493 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -18,7 +18,8 @@
     requires_pf, \
     small_patch_amr, \
     big_patch_amr, \
-    data_dir_load
+    data_dir_load, \
+    hmf_sim_and_analytic
 from yt.frontends.enzo.api import EnzoDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",


https://bitbucket.org/yt_analysis/yt/commits/d163d5f4389c/
Changeset:   d163d5f4389c
Branch:      yt-3.0
User:        bcrosby
Date:        2014-07-27 21:22:04
Summary:     Unsuccessful attempt at getting testing to work.
Affected #:  2 files

diff -r c7c77cffefa4ecc7b04971269a58c4b4b560f493 -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -75,11 +75,12 @@
 @requires_pf(hds0)
 @requires_pf(hds1)
 def test_halo_mass_function():
-	hds = data_dir_load(hds0)
-	yield assert_equal, str(hds), "halos_0.0.bin"
-	for test in hmf_sim_and_analytic(hds0):
-		test_halo_mass_function.__name__ = test.description
-		yield test
+    hds = data_dir_load(hds0)
+    yield assert_equal, str(hds), "halos_0.0.bin"
+    for test in hmf_sim_and_analytic(hds):
+    #    print "test.description: ", test.description
+    #    test_halo_mass_function.__name__ = test.description
+       yield test
 
 ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_pf(ecp, big_data=True)

diff -r c7c77cffefa4ecc7b04971269a58c4b4b560f493 -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -578,12 +578,15 @@
 
 class HaloMassFunctionTest(AnswerTestingTest):
     _type_name = "HaloMassFunction"
-    _attrs = ()
+    _attrs = ('halos_ds')
+    def __init__(self, halos_ds):
+        super(HaloMassFunctionTest, self).__init__(halos_ds)
+        self.halos_ds = halos_ds
 
     def run(self):
-        from yt.analysis_modules.halo_mass_function.api import *
         result = {}
-        hmf = HaloMassFunction(halos_ds=self.hc)
+        from yt.analysis_modules.halo_mass_function.api import HaloMassFcn
+        hmf = HaloMassFcn(halos_ds=self.halos_ds)
         result["masses_sim"] = hmf.masses_sim
         result["n_cumulative_sim"] = hmf.n_cumulative_sim
         result["masses_analytic"] = hmf.masses_analytic
@@ -593,15 +596,15 @@
 
     def compare(self, new_result, old_result):
         for newms, oldms in zip(new_result['masses_sim'], old_result['masses_sim']):
-            assert(newms, oldms)
+            assert(newms == oldms)
         for newncs, oldncs in zip(new_result['n_cumulative_sim'], old_result['n_cumulative_sim']):
-            assert(newncs, oldncs)
+            assert(newncs == oldncs)
         for newma, oldma in zip(new_result['masses_analytic'], old_result['masses_analytic']):
-            assert(newma, oldma)
+            assert(newma == oldma)
         for newnca, oldnca in zip(new_result['n_cumulative_analytic'], old_result['n_cumulative_analytic']):
-            assert(newnca, oldnca)
+            assert(newnca == oldnca)
         for newdndmdma, olddndmdma in zip(new_result['dndM_dM_analytic'], old_result['dndM_dM_analytic']):
-            assert(newdndmdma, olddndmdma)
+            assert(newdndmdma == olddndmdma)
 
 def compare_image_lists(new_result, old_result, decimals):
     fns = ['old.png', 'new.png']


https://bitbucket.org/yt_analysis/yt/commits/a85d0f72d678/
Changeset:   a85d0f72d678
Branch:      yt-3.0
User:        bcrosby
Date:        2014-07-27 21:42:31
Summary:     merged
Affected #:  547 files

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -7,6 +7,7 @@
 rockstar.cfg
 yt_updater.log
 yt/frontends/artio/_artio_caller.c
+yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.c
 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
 yt/frontends/ramses/_ramses_reader.cpp
 yt/frontends/sph/smoothing_kernel.c
@@ -41,6 +42,7 @@
 yt/utilities/lib/PointsInVolume.c
 yt/utilities/lib/QuadTree.c
 yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/ragged_arrays.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
 yt/utilities/lib/GridTree.c

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -2,15 +2,21 @@
 
 Contributors:   
                 Tom Abel (tabel at stanford.edu)
-                David Collins (dcollins at physics.ucsd.edu)
+                Gabriel Altay (gabriel.altay at gmail.com)
+                Kenza Arraki (karraki at gmail.com)
+                Alex Bogert (fbogert at ucsc.edu)
+                David Collins (dcollins4096 at gmail.com)
                 Brian Crosby (crosby.bd at gmail.com)
                 Andrew Cunningham (ajcunn at gmail.com)
+                Miguel de Val-Borro (miguel.deval at gmail.com)
                 Hilary Egan (hilaryye at gmail.com)
                 John Forces (jforbes at ucolick.org)
+                Sam Geen (samgeen at gmail.com)
                 Nathan Goldbaum (goldbaum at ucolick.org)
                 Markus Haider (markus.haider at uibk.ac.at)
                 Cameron Hummels (chummels at gmail.com)
                 Christian Karch (chiffre at posteo.de)
+                Ben W. Keller (kellerbw at mcmaster.ca)
                 Ji-hoon Kim (me at jihoonkim.org)
                 Steffen Klemer (sklemer at phys.uni-goettingen.de)
                 Kacper Kowalik (xarthisius.kk at gmail.com)
@@ -21,18 +27,23 @@
                 Chris Malone (chris.m.malone at gmail.com)
                 Josh Maloney (joshua.moloney at colorado.edu)
                 Chris Moody (cemoody at ucsc.edu)
+                Stuart Mumford (stuart at mumford.me.uk)
                 Andrew Myers (atmyers at astro.berkeley.edu)
                 Jill Naiman (jnaiman at ucolick.org)
+                Desika Narayanan (dnarayan at haverford.edu)
                 Kaylea Nelson (kaylea.nelson at yale.edu)
                 Jeff Oishi (jsoishi at gmail.com)
+                Brian O'Shea (bwoshea at gmail.com)
                 Jean-Claude Passy (jcpassy at uvic.ca)
+                John Regan (john.regan at helsinki.fi)
                 Mark Richardson (Mark.L.Richardson at asu.edu)
                 Thomas Robitaille (thomas.robitaille at gmail.com)
                 Anna Rosen (rosen at ucolick.org)
                 Douglas Rudd (drudd at uchicago.edu)
                 Anthony Scopatz (scopatz at gmail.com)
                 Noel Scudder (noel.scudder at stonybrook.edu)
-                Devin Silvia (devin.silvia at colorado.edu)
+                Pat Shriwise (shriwise at wisc.edu)
+                Devin Silvia (devin.silvia at gmail.com)
                 Sam Skillman (samskillman at gmail.com)
                 Stephen Skory (s at skory.us)
                 Britton Smith (brittonsmith at gmail.com)
@@ -42,8 +53,10 @@
                 Stephanie Tonnesen (stonnes at gmail.com)
                 Matthew Turk (matthewturk at gmail.com)
                 Rich Wagner (rwagner at physics.ucsd.edu)
+                Michael S. Warren (mswarren at gmail.com)
                 Andrew Wetzel (andrew.wetzel at yale.edu)
                 John Wise (jwise at physics.gatech.edu)
+                Michael Zingale (michael.zingale at stonybrook.edu)
                 John ZuHone (jzuhone at gmail.com)
 
 Several items included in the yt/extern directory were written by other

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -12,4 +12,3 @@
 prune tests
 graft yt/gui/reason/html/resources
 exclude clean.sh .hgchurn
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/README
--- a/doc/README
+++ b/doc/README
@@ -5,6 +5,6 @@
 http://sphinx.pocoo.org/
 
 Because the documentation requires a number of dependencies, we provide
-pre-build versions online, accessible here:
+pre-built versions online, accessible here:
 
-http://yt-project.org/docs/
+http://yt-project.org/docs/dev-3.0/

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/cheatsheet.tex
--- a/doc/cheatsheet.tex
+++ b/doc/cheatsheet.tex
@@ -3,7 +3,7 @@
 \usepackage{calc}
 \usepackage{ifthen}
 \usepackage[landscape]{geometry}
-\usepackage[colorlinks = true, linkcolor=blue, citecolor=blue, urlcolor=blue]{hyperref}
+\usepackage[hyphens]{url}
 
 % To make this come out properly in landscape mode, do one of the following
 % 1.
@@ -101,9 +101,13 @@
 Documentation \url{http://yt-project.org/doc/index.html}.
 Need help? Start here \url{http://yt-project.org/doc/help/} and then
 try the IRC chat room \url{http://yt-project.org/irc.html},
-or the mailing list \url{http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org}.
-{\bf Installing yt:} The easiest way to install yt is to use the installation script
-found on the yt homepage or the docs linked above.
+or the mailing list \url{http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org}. \\
+
+\subsection{Installing yt} The easiest way to install yt is to use the
+installation script found on the yt homepage or the docs linked above.  If you
+already have python set up with \texttt{numpy}, \texttt{scipy},
+\texttt{matplotlib}, \texttt{h5py}, and \texttt{cython}, you can also use
+\texttt{pip install yt}
 
 \subsection{Command Line yt}
 yt, and its convenience functions, are launched from a command line prompt.
@@ -118,9 +122,8 @@
 \texttt{yt stats} {\it dataset} \textemdash\ Print stats of a dataset. \\
 \texttt{yt update} \textemdash\ Update yt to most recent version.\\
 \texttt{yt update --all} \textemdash\ Update yt and dependencies to most recent version. \\
-\texttt{yt instinfo} \textemdash\ yt installation information. \\
+\texttt{yt version} \textemdash\ yt installation information. \\
 \texttt{yt notebook} \textemdash\ Run the IPython notebook server. \\
-\texttt{yt serve} ({\it dataset}) \textemdash\  Run yt-specific web GUI ({\it dataset} is optional).\\
 \texttt{yt upload\_image} {\it image.png} \textemdash\ Upload PNG image to imgur.com. \\
 \texttt{yt upload\_notebook} {\it notebook.nb} \textemdash\ Upload IPython notebook to hub.yt-project.org.\\
 \texttt{yt plot} {\it dataset} \textemdash\ Create a set of images.\\
@@ -132,16 +135,8 @@
  paste.yt-project.org. \\ 
 \texttt{yt pastebin\_grab} {\it identifier} \textemdash\ Print content of pastebin to
  STDOUT. \\
- \texttt{yt hub\_register} \textemdash\ Register with
-hub.yt-project.org. \\
-\texttt{yt hub\_submit} \textemdash\ Submit hg repo to
-hub.yt-project.org. \\
-\texttt{yt bootstrap\_dev} \textemdash\ Bootstrap a yt 
-development environment. \\
 \texttt{yt bugreport} \textemdash\ Report a yt bug. \\
 \texttt{yt hop} {\it dataset} \textemdash\  Run hop on a dataset. \\
-\texttt{yt rpdb} \textemdash\ Connect to running rpd 
- session. 
 
 \subsection{yt Imports}
 In order to use yt, Python must load the relevant yt modules into memory.
@@ -149,37 +144,40 @@
 used as part of a script.
 \newlength{\MyLen}
 \settowidth{\MyLen}{\texttt{letterpaper}/\texttt{a4paper} \ }
-\texttt{from yt.mods import \textasteriskcentered}  \textemdash\ 
-Load base yt  modules. \\
+\texttt{import yt}  \textemdash\ 
+Load yt. \\
 \texttt{from yt.config import ytcfg}  \textemdash\ 
 Used to set yt configuration options.
- If used, must be called before importing any other module.\\
-\texttt{from yt.analysis\_modules.api import \textasteriskcentered}   \textemdash\ 
-Load all yt analysis modules. \\
+If used, must be called before importing any other module.\\
 \texttt{from yt.analysis\_modules.\emph{halo\_finding}.api import \textasteriskcentered}  \textemdash\ 
 Load halo finding modules. Other modules
 are loaded in a similar way by swapping the 
 {\em emphasized} text.
 See the \textbf{Analysis Modules} section for a listing and short descriptions of each.
 
-\subsection{Numpy Arrays}
-Simulation data in yt is returned in Numpy arrays. The Numpy package provides a wealth of built-in
-functions that operate on Numpy arrays. Here is a very brief list of some useful ones.
-Please see \url{http://docs.scipy.org/doc/numpy/reference/} for the full
-numpy documentation.\\
-\settowidth{\MyLen}{\texttt{multicol} }
+\subsection{YTArray}
+Simulation data in yt is returned as a YTArray.  YTArray is a numpy array that
+has unit data attached to it and can automatically handle unit conversions and
+detect unit errors. Just like a numpy array, YTArray provides a wealth of
+built-in functions to calculate properties of the data in the array. Here is a
+very brief list of some useful ones.
+\settowidth{\MyLen}{\texttt{multicol} }\\
+\texttt{v = a.in\_cgs()} \textemdash\ Return the array in CGS units \\
+\texttt{v = a.in\_units('Msun/pc**3')} \textemdash\ Return the array in solar masses per cubic parsec \\ 
 \texttt{v = a.max(), a.min()} \textemdash\ Return maximum, minimum of \texttt{a}. \\
-\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max, 
+\texttt{index = a.argmax(), a.argmin()} \textemdash\ Return index of max,
 min value of \texttt{a}.\\
 \texttt{v = a[}{\it index}\texttt{]} \textemdash\ Select a single value from \texttt{a} at location {\it index}.\\
-\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from \texttt{a} between
+\texttt{b = a[}{\it i:j}\texttt{]} \textemdash\ Select the slice of values from
+\texttt{a} between
 locations {\it i} to {\it j-1} saved to a new Numpy array \texttt{b} with length {\it j-i}. \\
-\texttt{sel = (a > const)}  \textemdash\ Create a new boolean Numpy array \texttt{sel}, of the same shape as \texttt{a},
+\texttt{sel = (a > const)} \textemdash\ Create a new boolean Numpy array
+\texttt{sel}, of the same shape as \texttt{a},
 that marks which values of \texttt{a > const}. Other operators (e.g. \textless, !=, \%) work as well.\\
-\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of elements from \texttt{a} that correspond to elements of \texttt{sel}
+\texttt{b = a[sel]} \textemdash\ Create a new Numpy array \texttt{b} made up of
+elements from \texttt{a} that correspond to elements of \texttt{sel}
 that are {\it True}. In the above example \texttt{b} would be all elements of \texttt{a} that are greater than \texttt{const}.\\
-\texttt{a.dump({\it filename.dat})} \textemdash\ Save \texttt{a} to the binary file {\it filename.dat}.\\
-\texttt{a = np.load({\it filename.dat})} \textemdash\ Load the contents of {\it filename.dat} into \texttt{a}.
+\texttt{a.write\_hdf5({\it filename.h5})} \textemdash\ Save \texttt{a} to the hdf5 file {\it filename.h5}.\\
 
 \subsection{IPython Tips}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -196,6 +194,7 @@
 \texttt{\%hist} \textemdash\ Print recent command history.\\
 \texttt{\%quickref} \textemdash\ Print IPython quick reference.\\
 \texttt{\%pdb} \textemdash\ Automatically enter the Python debugger at an exception.\\
+\texttt{\%debug} \textemdash\ Drop into a debugger at the location of the last unhandled exception. \\
 \texttt{\%time, \%timeit} \textemdash\ Find running time of expressions for benchmarking.\\
 \texttt{\%lsmagic} \textemdash\ List all available IPython magics. Hint: \texttt{?} works with magics.\\
 
@@ -208,68 +207,52 @@
 After that, simulation data is generally accessed in yt using {\it Data Containers} which are Python objects
 that define a region of simulation space from which data should be selected.
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{pf = load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
-\texttt{dd = pf.h.all\_data()} \textemdash\ Select the entire volume.\\
-\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Saves the contents of {\it field} into the
-numpy array \texttt{a}. Similarly for other data containers.\\
-\texttt{pf.h.field\_list} \textemdash\ A list of available fields in the snapshot. \\
-\texttt{pf.h.derived\_field\_list} \textemdash\ A list of available derived fields
+\texttt{ds = yt.load(}{\it dataset}\texttt{)} \textemdash\   Reference a single snapshot.\\
+\texttt{dd = ds.all\_data()} \textemdash\ Select the entire volume.\\
+\texttt{a = dd[}{\it field\_name}\texttt{]} \textemdash\ Copies the contents of {\it field} into the
+YTArray \texttt{a}. Similarly for other data containers.\\
+\texttt{ds.field\_list} \textemdash\ A list of available fields in the snapshot. \\
+\texttt{ds.derived\_field\_list} \textemdash\ A list of available derived fields
 in the snapshot. \\
-\texttt{val, loc = pf.h.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
+\texttt{val, loc = ds.find\_max("Density")} \textemdash\ Find the \texttt{val}ue of
 the maximum of the field \texttt{Density} and its \texttt{loc}ation. \\
-\texttt{sp = pf.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
+\texttt{sp = ds.sphere(}{\it cen}\texttt{,}{\it radius}\texttt{)} \textemdash\   Create a spherical data 
 container. {\it cen} may be a coordinate, or ``max'' which 
 centers on the max density point. {\it radius} may be a float in 
 code units or a tuple of ({\it length, unit}).\\
 
-\texttt{re = pf.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
+\texttt{re = ds.region({\it cen}, {\it left edge}, {\it right edge})} \textemdash\ Create a
 rectilinear data container. {\it cen} is required but not used.
 {\it left} and {\it right edge} are coordinate values that define the region.
 
-\texttt{di = pf.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
+\texttt{di = ds.disk({\it cen}, {\it normal}, {\it radius}, {\it height})} \textemdash\ 
 Create a cylindrical data container centered at {\it cen} along the 
 direction set by {\it normal},with total length
  2$\times${\it height} and with radius {\it radius}. \\
  
- \texttt{bl = pf.boolean({\it constructor})} \textemdash\ Create a boolean data
- container. {\it constructor} is a list of pre-defined non-boolean 
- data containers with nested boolean logic using the
- ``AND'', ``NOT'', or ``OR'' operators. E.g. {\it constructor=}
- {\it [sp, ``NOT'', (di, ``OR'', re)]} gives a volume defined
- by {\it sp} minus the patches covered by {\it di} and {\it re}.\\
- 
-\texttt{pf.h.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
-\texttt{sp = pf.h.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
+\texttt{ds.save\_object(sp, {\it ``sp\_for\_later''})} \textemdash\ Save an object (\texttt{sp}) for later use.\\
+\texttt{sp = ds.load\_object({\it ``sp\_for\_later''})} \textemdash\ Recover a saved object.\\
 
 
-\subsection{Defining New Fields \& Quantities}
-\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. Quantities reduce a field (e.g. "Density") defined over an object (e.g. "sphere") to get a single value (e.g. "Mass"). \\
-\texttt{def \_MetalMassMsun({\it field},{\it data})}\\
-\texttt{\hspace{4 mm} return data["Metallicity"]*data["CellMassMsun"]}\\
-\texttt{add\_field("MetalMassMsun",function=\_MetalMassMsun)}\\
-Define a new quantity; note the first function operates on grids and data objects and the second on the results of the first. \\
-\texttt{def \_TotalMass(data): }\\
-\texttt{\hspace{4 mm} baryon\_mass = data["CellMassMsun"].sum()}\\
-\texttt{\hspace{4 mm} particle\_mass = data["ParticleMassMsun"].sum()}\\
-\texttt{\hspace{4 mm} return baryon\_mass, particle\_mass}\\
-\texttt{def \_combTotalMass(data, baryon\_mass, particle\_mass):}\\
-\texttt{\hspace{4 mm} return baryon\_mass.sum() + particle\_mass.sum()}\\
-\texttt{add\_quantity("TotalMass", function=\_TotalMass,}\\
-\texttt{\hspace{4 mm} combine\_function=\_combTotalMass, n\_ret = 2)}\\
-
-
+\subsection{Defining New Fields}
+\texttt{yt} expects on-disk fields, fields generated on-demand and in-memory. 
+Field can either be created before a dataset is loaded using \texttt{add\_field}:
+\texttt{def \_metal\_mass({\it field},{\it data})}\\
+\texttt{\hspace{4 mm} return data["metallicity"]*data["cell\_mass"]}\\
+\texttt{add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
+Or added to an existing dataset using \texttt{ds.add\_field}:
+\texttt{ds.add\_field("metal\_mass", units='g', function=\_metal\_mass)}\\
 
 \subsection{Slices and Projections}
 \settowidth{\MyLen}{\texttt{multicol} }
-\texttt{slc = SlicePlot(pf, {\it axis}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
-perpendicular to {\it axis} of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
-{\it width} in code units or a (value, unit) tuple. Hint: try {\it SlicePlot?} in IPython to see additional parameters.\\
+\texttt{slc = yt.SlicePlot(ds, {\it axis or normal vector}, {\it field}, {\it center=}, {\it width=}, {\it weight\_field=}, {\it additional parameters})} \textemdash\ Make a slice plot
+perpendicular to {\it axis} (specified via 'x', 'y', or 'z') or a normal vector for an off-axis slice of {\it field} weighted by {\it weight\_field} at (code-units) {\it center} with 
+{\it width} in code units or a (value, unit) tuple. Hint: try {\it yt.SlicePlot?} in IPython to see additional parameters.\\
 \texttt{slc.save({\it file\_prefix})} \textemdash\ Save the slice to a png with name prefix {\it file\_prefix}.
 \texttt{.save()} works similarly for the commands below.\\
 
-\texttt{prj = ProjectionPlot(pf, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
-\texttt{prj = OffAxisSlicePlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off-axis slice. Note this takes an array of fields. \\
-\texttt{prj = OffAxisProjectionPlot(pf, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
+\texttt{prj = yt.ProjectionPlot(ds, {\it axis}, {\it field}, {\it addit. params})} \textemdash\ Make a projection. \\
+\texttt{prj = yt.OffAxisProjectionPlot(ds, {\it normal}, {\it fields}, {\it center=}, {\it width=}, {\it depth=},{\it north\_vector=},{\it weight\_field=})} \textemdash Make an off axis projection. Note this takes an array of fields. \\
 
 \subsection{Plot Annotations}
 \settowidth{\MyLen}{\texttt{multicol} }
@@ -299,51 +282,37 @@
 The \texttt{my\_plugins.py} file \textemdash\ Add functions, derived fields, constants, or other commonly-used Python code to yt.
 
 
-
-
 \subsection{Analysis Modules}
 \settowidth{\MyLen}{\texttt{multicol}}
 The import name for each module is listed at the end of each description (see \textbf{yt Imports}).
 
 \texttt{Absorption Spectrum} \textemdash\ (\texttt{absorption\_spectrum}). \\
 \texttt{Clump Finder} \textemdash\ Find clumps defined by density thresholds (\texttt{level\_sets}). \\
-\texttt{Coordinate Transformation} \textemdash\ (\texttt{coordinate\_transformation}). \\
 \texttt{Halo Finding} \textemdash\ Locate halos of dark matter particles (\texttt{halo\_finding}). \\
-\texttt{Halo Mass Function} \textemdash\ Find halo mass functions from data and from theory (\texttt{halo\_mass\_function}). \\
-\texttt{Halo Profiling} \textemdash\ Profile and project multiple halos (\texttt{halo\_profiler}). \\
-\texttt{Halo Merger Tree} \textemdash\ Create a database of halo mergers (\texttt{halo\_merger\_tree}). \\
 \texttt{Light Cone Generator} \textemdash\ Stitch datasets together to perform analysis over cosmological volumes. \\
 \texttt{Light Ray Generator} \textemdash\ Analyze the path of light rays.\\
-\texttt{Radial Column Density} \textemdash\ Calculate column densities around a point (\texttt{radial\_column\_density}). \\
 \texttt{Rockstar Halo Finding} \textemdash\ Locate halos of dark matter using the Rockstar halo finder (\texttt{halo\_finding.rockstar}). \\
 \texttt{Star Particle Analysis} \textemdash\ Analyze star formation history and assemble spectra (\texttt{star\_analysis}). \\
 \texttt{Sunrise Exporter} \textemdash\ Export data to the sunrise visualization format (\texttt{sunrise\_export}). \\
-\texttt{Two Point Functions} \textemdash\ Two point correlations (\texttt{two\_point\_functions}). \\
 
 
 \subsection{Parallel Analysis}
-\settowidth{\MyLen}{\texttt{multicol}}
-Nearly all of yt is parallelized using MPI.
-The {\it mpi4py} package must be installed for parallelism in yt.
-To install {\it pip install mpi4py} on the command line usually works.
+\settowidth{\MyLen}{\texttt{multicol}} 
+Nearly all of yt is parallelized using
+MPI.  The {\it mpi4py} package must be installed for parallelism in yt.  To
+install {\it pip install mpi4py} on the command line usually works.
 Execute python in parallel similar to this:\\
-{\it mpirun -n 12 python script.py --parallel}\\
-This command may differ for each system on which you use yt;
-please consult the system documentation for details on how to run parallel applications.
+{\it mpirun -n 12 python script.py}\\
+The file \texttt{script.py} must call the \texttt{yt.enable\_parallelism()} to
+turn on yt's parallelism.  If this doesn't happen, all cores will execute the
+same serial yt script.  This command may differ for each system on which you use
+yt; please consult the system documentation for details on how to run parallel
+applications.
 
-\texttt{from yt.pmods import *} \textemdash\ Load yt faster when in parallel.
-This replaces the usual \texttt{from yt.mods import *}.\\
 \texttt{parallel\_objects()} \textemdash\ A way to parallelize analysis over objects
 (such as halos or clumps).\\
 
 
-\subsection{Pre-Installed Versions}
-\settowidth{\MyLen}{\texttt{multicol}}
-yt is pre-installed on several supercomputer systems.
-
-\textbf{NICS Kraken} \textemdash\ {\it module load yt} \\
-
-
 \subsection{Mercurial}
 \settowidth{\MyLen}{\texttt{multicol}}
 Please see \url{http://mercurial.selenic.com/} for the full Mercurial documentation.
@@ -365,8 +334,7 @@
 \subsection{FAQ}
 \settowidth{\MyLen}{\texttt{multicol}}
 
-\texttt{pf.field\_info[`field'].take\_log = False} \textemdash\ When plotting \texttt{field}, do not take log.
-Must enter \texttt{pf.h} before this command. \\
+\texttt{slc.set\_log('field', False)} \textemdash\ When plotting \texttt{field}, use linear scaling instead of log scaling.
 
 
 %\rule{0.3\linewidth}{0.25pt}

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -49,7 +49,7 @@
  * Don't create a new class to replicate the functionality of an old class --
    replace the old class.  Too many options makes for a confusing user
    experience.
- * Parameter files are a last resort.
+ * Parameter files external to yt are a last resort.
  * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
@@ -61,7 +61,7 @@
    * Hard-coding parameter names that are the same as those in Enzo.  The
      following translation table should be of some help.  Note that the
      parameters are now properties on a Dataset subclass: you access them
-     like pf.refine_by .
+     like ds.refine_by .
      * RefineBy => refine_by
      * TopGridRank => dimensionality
      * TopGridDimensions => domain_dimensions

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/docstring_example.txt
--- a/doc/docstring_example.txt
+++ b/doc/docstring_example.txt
@@ -73,7 +73,7 @@
     Examples
     --------
     These are written in doctest format, and should illustrate how to
-    use the function.  Use the variables 'pf' for the parameter file, 'pc' for
+    use the function.  Use the variables 'ds' for the dataset, 'pc' for
     a plot collection, 'c' for a center, and 'L' for a vector. 
 
     >>> a=[1,2,3]

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/docstring_idioms.txt
--- a/doc/docstring_idioms.txt
+++ b/doc/docstring_idioms.txt
@@ -19,7 +19,7 @@
 useful variable names that correspond to specific instances that the user is
 presupposed to have created.
 
-   * `pf`: a parameter file, loaded successfully
+   * `ds`: a dataset, loaded successfully
    * `sp`: a sphere
    * `c`: a 3-component "center"
    * `L`: a 3-component vector that corresponds to either angular momentum or a

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/helper_scripts/parse_cb_list.py
--- a/doc/helper_scripts/parse_cb_list.py
+++ b/doc/helper_scripts/parse_cb_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/visualizing/_cb_docstrings.inc", "w")
 

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/helper_scripts/parse_dq_list.py
--- a/doc/helper_scripts/parse_dq_list.py
+++ b/doc/helper_scripts/parse_dq_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_dq_docstrings.inc", "w")
 
@@ -29,7 +29,7 @@
                             docstring = docstring))
                             #docstring = "\n".join(tw.wrap(docstring))))
 
-dd = pf.h.all_data()
+dd = ds.all_data()
 for n,func in sorted(dd.quantities.functions.items()):
     print n, func
     write_docstring(output, n, func[1])

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/helper_scripts/parse_object_list.py
--- a/doc/helper_scripts/parse_object_list.py
+++ b/doc/helper_scripts/parse_object_list.py
@@ -2,7 +2,7 @@
 import inspect
 from textwrap import TextWrapper
 
-pf = load("RD0005-mine/RedshiftOutput0005")
+ds = load("RD0005-mine/RedshiftOutput0005")
 
 output = open("source/analyzing/_obj_docstrings.inc", "w")
 
@@ -27,7 +27,7 @@
     f.write(template % dict(clsname = clsname, sig = sig, clsproxy=clsproxy,
                             docstring = 'physical-object-api'))
 
-for n,c in sorted(pf.h.__dict__.items()):
+for n,c in sorted(ds.__dict__.items()):
     if hasattr(c, '_con_args'):
         print n
         write_docstring(output, n, c)

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -17,15 +17,15 @@
 everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
 and so on.
 
-Try using the ``pf.field_list`` and ``pf.derived_field_list`` to view the
+Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
 to display the native fields in alphabetical order:
 
 .. notebook-cell::
 
   from yt.mods import *
-  pf = load("Enzo_64/DD0043/data0043")
-  for i in sorted(pf.field_list):
+  ds = load("Enzo_64/DD0043/data0043")
+  for i in sorted(ds.field_list):
     print i
 
 .. note:: Universal fields will be overridden by a code-specific field.

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -567,8 +567,10 @@
 
 mkdir -p ${DEST_DIR}/data
 cd ${DEST_DIR}/data
-echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
-get_ytdata xray_emissivity.h5
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  cloudy_emissivity.h5' > cloudy_emissivity.h5.sha512
+[ ! -e cloudy_emissivity.h5 ] && get_ytdata cloudy_emissivity.h5
+echo '0f714ae2eace0141b1381abf1160dc8f8a521335e886f99919caf3beb31df1fe271d67c7b2a804b1467949eb16b0ef87a3d53abad0e8160fccac1e90d8d9e85f  apec_emissivity.h5' > apec_emissivity.h5.sha512
+[ ! -e apec_emissivity.h5 ] && get_ytdata apec_emissivity.h5
 
 # Set paths to what they should be when yt is activated.
 export PATH=${DEST_DIR}/bin:$PATH
@@ -586,7 +588,7 @@
 FREETYPE_VER='freetype-2.4.12'
 H5PY='h5py-2.1.3'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.1.0'
+IPYTHON='ipython-2.1.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
 MATPLOTLIB='matplotlib-1.3.0'
@@ -608,14 +610,13 @@
 echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
 echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
 echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca  ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
+echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
 echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
@@ -624,7 +625,6 @@
 echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
 echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
 echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
 echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
@@ -657,7 +657,6 @@
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
-get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e $BZLIB/done ]
@@ -816,6 +815,7 @@
         YT_DIR=`dirname $ORIG_PWD`
     elif [ ! -e yt-hg ]
     then
+        echo "Cloning yt"
         YT_DIR="$PWD/yt-hg/"
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
         # Recently the hg server has had some issues with timeouts.  In lieu of
@@ -824,9 +824,9 @@
         ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
         # Now we update to the branch we're interested in.
         ( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-    elif [ -e yt-3.0-hg ] 
+    elif [ -e yt-hg ]
     then
-        YT_DIR="$PWD/yt-3.0-hg/"
+        YT_DIR="$PWD/yt-hg/"
     fi
     echo Setting YT_DIR=${YT_DIR}
 fi
@@ -943,14 +943,19 @@
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
-    if [ ! -e Rockstar/done ]
+    if [ ! -e rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
-        cd Rockstar
+        if [ ! -e rockstar ]
+        then
+            ( hg clone http://bitbucket.org/MatthewTurk/rockstar 2>&1 ) 1>> ${LOG_FILE}
+        fi
+        cd rockstar
+        ( hg pull 2>&1 ) 1>> ${LOG_FILE}
+        ( hg up -C tip 2>&1 ) 1>> ${LOG_FILE}
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
         cp librockstar.so ${DEST_DIR}/lib
-        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        ROCKSTAR_DIR=${DEST_DIR}/src/rockstar
         echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
         touch done
         cd ..

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/_static/agogo_yt.css
--- a/doc/source/_static/agogo_yt.css
+++ /dev/null
@@ -1,41 +0,0 @@
- at import url("agogo.css");
- at import url("http://fonts.googleapis.com/css?family=Crimson+Text");
- at import url("http://fonts.googleapis.com/css?family=Droid+Sans");
-
-div.document ul {
-  margin-left: 1.5em;
-  margin-top: 0.0em;
-  margin-bottom: 1.0em;
-}
-
-div.document li.toctree-l1 {
-  margin-bottom: 0.5em;
-}
-
-table.contentstable {
-  width: 100%;
-}
-
-table.contentstable td {
-  padding: 5px 15px 0px 15px;
-}
-
-table.contentstable tr {
-  border-bottom: 1px solid black;
-}
-
-a.biglink {
-  line-height: 1.2em;
-}
-
-a tt.xref {
-  font-weight: bolder;
-}
-
-table.docutils {
-  width: 100%;
-}
-
-table.docutils td {
-  width: 50%;
-}

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/_static/custom.css
--- /dev/null
+++ b/doc/source/_static/custom.css
@@ -0,0 +1,8 @@
+blockquote {
+    font-size: 16px;
+    border-left: none;
+}
+
+dd {
+    margin-left: 30px;
+}
\ No newline at end of file

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/_templates/layout.html
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -35,3 +35,5 @@
     </div>
 {%- endblock %}
 
+{# Custom CSS overrides #}
+{% set bootswatch_css_custom = ['_static/custom.css'] %}

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/_dq_docstrings.inc
--- a/doc/source/analyzing/_dq_docstrings.inc
+++ b/doc/source/analyzing/_dq_docstrings.inc
@@ -1,43 +1,20 @@
 
 
-.. function:: Action(action, combine_action, filter=None):
+.. function:: angular_momentum_vector()
 
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._Action`.)
-   This function evals the string given by the action arg and uses 
-   the function thrown with the combine_action to combine the values.  
-   A filter can be thrown to be evaled to short-circuit the calculation 
-   if some criterion is not met.
-   :param action: a string containing the desired action to be evaled.
-   :param combine_action: the function used to combine the answers when done lazily.
-   :param filter: a string to be evaled to serve as a data filter.
-
-
-
-.. function:: AngularMomentumVector():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._AngularMomentumVector`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.AngularMomentumVector`.)
    This function returns the mass-weighted average angular momentum vector.
 
 
+.. function:: bulk_velocity():
 
-.. function:: BaryonSpinParameter():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._BaryonSpinParameter`.)
-   This function returns the spin parameter for the baryons, but it uses
-   the particles in calculating enclosed mass.
-
-
-
-.. function:: BulkVelocity():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._BulkVelocity`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.BulkVelocity`.)
    This function returns the mass-weighted average velocity in the object.
 
 
+.. function:: center_of_mass(use_cells=True, use_particles=False):
 
-.. function:: CenterOfMass(use_cells=True, use_particles=False):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._CenterOfMass`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.CenterOfMass`.)
    This function returns the location of the center
    of mass. By default, it computes of the *non-particle* data in the object. 
    
@@ -51,112 +28,64 @@
 
 
 
-.. function:: Extrema(fields, non_zero=False, filter=None):
+.. function:: extrema(fields, non_zero=False, filter=None):
 
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._Extrema`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.Extrema`.)
    This function returns the extrema of a set of fields
    
    :param fields: A field name, or a list of field names
    :param filter: a string to be evaled to serve as a data filter.
 
 
+.. function:: max_location(field):
 
-.. function:: IsBound(truncate=True, include_thermal_energy=False, treecode=True, opening_angle=1.0, periodic_test=False, include_particles=True):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._IsBound`.)
-   This returns whether or not the object is gravitationally bound. If this
-   returns a value greater than one, it is bound, and otherwise not.
-   
-   Parameters
-   ----------
-   truncate : Bool
-       Should the calculation stop once the ratio of
-       gravitational:kinetic is 1.0?
-   include_thermal_energy : Bool
-       Should we add the energy from ThermalEnergy
-       on to the kinetic energy to calculate 
-       binding energy?
-   treecode : Bool
-       Whether or not to use the treecode.
-   opening_angle : Float 
-       The maximal angle a remote node may subtend in order
-       for the treecode method of mass conglomeration may be
-       used to calculate the potential between masses.
-   periodic_test : Bool 
-       Used for testing the periodic adjustment machinery
-       of this derived quantity.
-   include_particles : Bool
-       Should we add the mass contribution of particles
-       to calculate binding energy?
-   
-   Examples
-   --------
-   >>> sp.quantities["IsBound"](truncate=False,
-   ... include_thermal_energy=True, treecode=False, opening_angle=2.0)
-   0.32493
-
-
-
-.. function:: MaxLocation(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._MaxLocation`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.max_location`.)
    This function returns the location of the maximum of a set
    of fields.
 
 
+.. function:: min_location(field):
 
-.. function:: MinLocation(field):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._MinLocation`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.MinLocation`.)
    This function returns the location of the minimum of a set
    of fields.
 
 
 
-.. function:: ParticleSpinParameter():
+.. function:: spin_parameter(use_gas=True, use_particles=True):
 
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._ParticleSpinParameter`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.SpinParameter`.)
    This function returns the spin parameter for the baryons, but it uses
    the particles in calculating enclosed mass.
 
 
+.. function:: total_mass():
 
-.. function:: StarAngularMomentumVector():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._StarAngularMomentumVector`.)
-   This function returns the mass-weighted average angular momentum vector 
-   for stars.
-
-
-
-.. function:: TotalMass():
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._TotalMass`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalMass`.)
    This function takes no arguments and returns the sum of cell masses and
    particle masses in the object.
 
 
+.. function:: total_quantity(fields):
 
-.. function:: TotalQuantity(fields):
-
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._TotalQuantity`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.TotalQuantity`.)
    This function sums up a given field over the entire region
    
    :param fields: The fields to sum up
 
 
 
-.. function:: WeightedAverageQuantity(field, weight):
+.. function:: weighted_average_quantity(field, weight):
 
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._WeightedAverageQuantity`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedAverageQuantity`.)
    This function returns an averaged quantity.
    
    :param field: The field to average
    :param weight: The field to weight by
 
-.. function:: WeightedVariance(field, weight):
+.. function:: weighted_variance(field, weight):
 
-   (This is a proxy for :func:`~yt.data_objects.derived_quantities._WeightedVariance`.)
+   (This is a proxy for :func:`~yt.data_objects.derived_quantities.WeightedVariance`.)
     This function returns the variance of a field.
 
     :param field: The target field

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/_obj_docstrings.inc
--- a/doc/source/analyzing/_obj_docstrings.inc
+++ b/doc/source/analyzing/_obj_docstrings.inc
@@ -1,12 +1,12 @@
 
 
-.. class:: boolean(self, regions, fields=None, pf=None, **field_parameters):
+.. class:: boolean(self, regions, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRBooleanRegionBase`.)
 
 
-.. class:: covering_grid(self, level, left_edge, dims, fields=None, pf=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
+.. class:: covering_grid(self, level, left_edge, dims, fields=None, ds=None, num_ghost_zones=0, use_pbar=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCoveringGridBase`.)
@@ -24,13 +24,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCuttingPlaneBase`.)
 
 
-.. class:: disk(self, center, normal, radius, height, fields=None, pf=None, **field_parameters):
+.. class:: disk(self, center, normal, radius, height, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRCylinderBase`.)
 
 
-.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, pf=None, **field_parameters):
+.. class:: ellipsoid(self, center, A, B, C, e0, tilt, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMREllipsoidBase`.)
@@ -48,79 +48,79 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResCuttingPlaneBase`.)
 
 
-.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, pf=None, **field_parameters):
+.. class:: fixed_res_proj(self, axis, level, left_edge, dims, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRFixedResProjectionBase`.)
 
 
-.. class:: grid_collection(self, center, grid_list, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection(self, center, grid_list, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRGridCollectionBase`.)
 
 
-.. class:: grid_collection_max_level(self, center, max_level, fields=None, pf=None, **field_parameters):
+.. class:: grid_collection_max_level(self, center, max_level, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRMaxLevelCollectionBase`.)
 
 
-.. class:: inclined_box(self, origin, box_vectors, fields=None, pf=None, **field_parameters):
+.. class:: inclined_box(self, origin, box_vectors, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRInclinedBoxBase`.)
 
 
-.. class:: ortho_ray(self, axis, coords, fields=None, pf=None, **field_parameters):
+.. class:: ortho_ray(self, axis, coords, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMROrthoRayBase`.)
 
 
-.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
+.. class:: overlap_proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style='level', serialize=True, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRProjBase`.)
 
 
-.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionBase`.)
 
 
-.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: periodic_region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRPeriodicRegionStrictBase`.)
 
 
-.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, pf=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
+.. class:: proj(self, axis, field, weight_field=None, max_level=None, center=None, ds=None, source=None, node_name=None, field_cuts=None, preload_style=None, serialize=True, style='integrate', **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRQuadTreeProjBase`.)
 
 
-.. class:: ray(self, start_point, end_point, fields=None, pf=None, **field_parameters):
+.. class:: ray(self, start_point, end_point, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRayBase`.)
 
 
-.. class:: region(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionBase`.)
 
 
-.. class:: region_strict(self, center, left_edge, right_edge, fields=None, pf=None, **field_parameters):
+.. class:: region_strict(self, center, left_edge, right_edge, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRRegionStrictBase`.)
 
 
-.. class:: slice(self, axis, coord, fields=None, center=None, pf=None, node_name=False, **field_parameters):
+.. class:: slice(self, axis, coord, fields=None, center=None, ds=None, node_name=False, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSliceBase`.)
@@ -132,13 +132,13 @@
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSmoothedCoveringGridBase`.)
 
 
-.. class:: sphere(self, center, radius, fields=None, pf=None, **field_parameters):
+.. class:: sphere(self, center, radius, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRSphereBase`.)
 
 
-.. class:: streamline(self, positions, length=1.0, fields=None, pf=None, **field_parameters):
+.. class:: streamline(self, positions, length=1.0, fields=None, ds=None, **field_parameters):
 
    For more information, see :ref:`physical-object-api`
    (This is a proxy for :class:`~yt.data_objects.data_containers.AMRStreamlineBase`.)

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ b/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:c423bcb9e3370a4581cbaaa8e764b95ec13e665aa3b46d452891d76cc79d7acf"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -34,7 +35,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "from yt.mods import *\n",
+      "import yt\n",
       "from yt.analysis_modules.halo_analysis.api import *\n",
       "import tempfile\n",
       "import shutil\n",
@@ -44,7 +45,7 @@
       "tmpdir = tempfile.mkdtemp()\n",
       "\n",
       "# Load the data set with the full simulation information\n",
-      "data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')"
+      "data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')"
      ],
      "language": "python",
      "metadata": {},
@@ -62,7 +63,7 @@
      "collapsed": false,
      "input": [
       "# Load the rockstar data files\n",
-      "halos_pf = load('rockstar_halos/halos_0.0.bin')"
+      "halos_ds = yt.load('rockstar_halos/halos_0.0.bin')"
      ],
      "language": "python",
      "metadata": {},
@@ -80,7 +81,7 @@
      "collapsed": false,
      "input": [
       "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf, \n",
+      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
       "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -295,9 +296,9 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "halos_pf =  load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
+      "halos_ds =  yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
       "\n",
-      "hc_reloaded = HaloCatalog(halos_pf=halos_pf,\n",
+      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
       "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
      ],
      "language": "python",
@@ -390,12 +391,13 @@
      "input": [
       "%matplotlib inline\n",
       "import matplotlib.pyplot as plt\n",
+      "import numpy as np\n",
       "\n",
-      "plt.plot(radius, temperature)\n",
+      "plt.plot(np.array(radius), np.array(temperature))\n",
       "\n",
       "plt.semilogy()\n",
-      "plt.xlabel('$\\mathrm{R/R_{vir}}$')\n",
-      "plt.ylabel('$\\mathrm{Temperature~[K]}$')\n",
+      "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n",
+      "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n",
       "\n",
       "plt.show()"
      ],

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/PPVCube.ipynb
--- a/doc/source/analyzing/analysis_modules/PPVCube.ipynb
+++ b/doc/source/analyzing/analysis_modules/PPVCube.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:3a720e0a18272564522f9fc23553431908d6f2b4f3e3e7dfe5b3e690e2e37677"
+  "signature": "sha256:56a8d72735e3cc428ff04b241d4b2ce6f653019818c6fc7a4148840d99030c85"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -16,6 +16,19 @@
      ]
     },
     {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "import yt\n",
+      "import numpy as np\n",
+      "\n",
+      "from yt.analysis_modules.ppv_cube.api import PPVCube"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
      "cell_type": "markdown",
      "metadata": {},
      "source": [
@@ -44,30 +57,40 @@
      ]
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "First, we'll set up the grid and the parameters of the profiles:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import PPVCube"
+      "nx,ny,nz = (256,256,256) # domain dimensions\n",
+      "R = 10. # outer radius of disk, kpc\n",
+      "r_0 = 3. # scale radius, kpc\n",
+      "beta = 1.4 # for the tangential velocity profile\n",
+      "alpha = -1. # for the radial density profile\n",
+      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates of x-y plane of disk\n",
+      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
+      "theta = np.arctan2(y, x) # polar coordinates"
      ],
      "language": "python",
      "metadata": {},
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Second, we'll construct the data arrays for the density and the velocity of the disk. Since we have the tangential velocity profile, we have to use the polar coordinates we derived earlier to compute `velx` and `vely`. Everywhere outside the disk, all fields are set to zero.  "
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "data = {}\n",
-      "nx,ny,nz = (256,256,256)\n",
-      "R = 10. # kpc\n",
-      "r_0 = 3. # kpc\n",
-      "beta = 1.4\n",
-      "alpha = -1.\n",
-      "x, y = np.mgrid[-R:R:nx*1j,-R:R:ny*1j] # cartesian coordinates\n",
-      "r = np.sqrt(x*x+y*y) # polar coordinates\n",
-      "theta = np.arctan2(y, x) # polar coordinates\n",
       "dens = np.zeros((nx,ny,nz))\n",
       "dens[:,:,nz/2-3:nz/2+3] = (r**alpha).reshape(nx,ny,1) # the density profile of the disk\n",
       "vel_theta = r/(1.+(r/r_0)**beta) # the azimuthal velocity profile of the disk\n",
@@ -75,12 +98,32 @@
       "vely = np.zeros((nx,ny,nz))\n",
       "velx[:,:,nz/2-3:nz/2+3] = (-vel_theta*np.sin(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
       "vely[:,:,nz/2-3:nz/2+3] = (vel_theta*np.cos(theta)).reshape(nx,ny,1) # convert polar to cartesian\n",
+      "dens[r > R] = 0.0\n",
+      "velx[r > R] = 0.0\n",
+      "vely[r > R] = 0.0"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Finally, we'll package these data arrays up into a dictionary, which will then be shipped off to `load_uniform_grid`. We'll define the width of the grid to be `2*R` kpc, which will be equal to 1  `code_length`. "
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "data = {}\n",
       "data[\"density\"] = (dens,\"g/cm**3\")\n",
       "data[\"velocity_x\"] = (velx, \"km/s\")\n",
       "data[\"velocity_y\"] = (vely, \"km/s\")\n",
       "data[\"velocity_z\"] = (np.zeros((nx,ny,nz)), \"km/s\") # zero velocity in the z-direction\n",
-      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])\n",
-      "ds = load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
+      "bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]]) # bbox of width 1 on a side with center (0,0,0)\n",
+      "ds = yt.load_uniform_grid(data, (nx,ny,nz), length_unit=(2*R,\"kpc\"), nprocs=1, bbox=bbox)"
      ],
      "language": "python",
      "metadata": {},
@@ -97,7 +140,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\",\"velocity_x\",\"velocity_y\",\"velocity_magnitude\"])"
      ],
      "language": "python",
      "metadata": {},
@@ -146,7 +189,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-0.5,0.5,\"km/s\"))"
+      "cube = PPVCube(ds, L, \"density\", dims=(200,100,50), velocity_bounds=(-1.5,1.5,\"km/s\"))"
      ],
      "language": "python",
      "metadata": {},
@@ -180,8 +223,18 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"cube.fits\")\n",
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=\"c\") # sliced at the center of the domain\n",
+      "ds = yt.load(\"cube.fits\")"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "# Specifying no center gives us the center slice\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"])\n",
       "slc.show()"
      ],
      "language": "python",
@@ -192,19 +245,11 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "# To figure out what the domain center and width is in pixel (code length) units:\n",
-      "print ds.domain_center\n",
-      "print ds.domain_width"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,-250.0]) # \"z\" slice is in m/s\n",
+      "import yt.units as u\n",
+      "# Picking different velocities for the slices\n",
+      "new_center = ds.domain_center\n",
+      "new_center[2] = ds.spec2pixel(-1.0*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -215,7 +260,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "slc = SlicePlot(ds, \"z\", [\"density\"], center=[100.5,50.5,300.0])\n",
+      "new_center[2] = ds.spec2pixel(0.7*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
       "slc.show()"
      ],
      "language": "python",
@@ -225,7 +271,31 @@
     {
      "cell_type": "code",
      "collapsed": false,
-     "input": [],
+     "input": [
+      "new_center[2] = ds.spec2pixel(-0.3*u.km/u.s)\n",
+      "slc = yt.SlicePlot(ds, \"z\", [\"density\"], center=new_center)\n",
+      "slc.show()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "If we project all the emission at all the different velocities along the z-axis, we recover the entire disk:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "prj = yt.ProjectionPlot(ds, \"z\", [\"density\"], proj_style=\"sum\")\n",
+      "prj.set_log(\"density\", True)\n",
+      "prj.set_zlim(\"density\", 1.0e-3, 0.2)\n",
+      "prj.show()"
+     ],
      "language": "python",
      "metadata": {},
      "outputs": []

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e4b5ea69687eb79452c16385b3a6f795b4572518dfa7f9d8a8125bd75b5fea85"
+  "signature": "sha256:5ab80c6b33a115cb88c36fde8659434d14a852dd43b0b419f2bb0c04acf66278"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,7 +20,7 @@
      "collapsed": false,
      "input": [
       "%matplotlib inline\n",
-      "from yt.mods import *\n",
+      "import yt\n",
       "import glob\n",
       "from yt.analysis_modules.particle_trajectories.api import ParticleTrajectories\n",
       "from yt.config import ytcfg\n",
@@ -77,7 +77,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(my_fns[0])\n",
+      "ds = yt.load(my_fns[0])\n",
       "dd = ds.all_data()\n",
       "indices = dd[\"particle_index\"].astype(\"int\")\n",
       "print indices"
@@ -205,8 +205,8 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "ds = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
-      "slc = SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "slc = yt.SlicePlot(ds, \"x\", [\"density\",\"dark_matter_density\"], center=\"max\", width=(3.0, \"Mpc\"))\n",
       "slc.show()"
      ],
      "language": "python",

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e5d3c629592c8aacbabf2e3fab2660703298886b8de6f36eb7cdc1f60b726496"
+  "signature": "sha256:e4db171b795d155870280ddbe8986f55f9a94ffb10783abf9d4cc2de3ec24894"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -18,7 +18,7 @@
       "projection of the pressure field of a cluster. However, the *full* S-Z signal is a combination of thermal and kinetic\n",
       "contributions, and for large frequencies and high temperatures\n",
       "relativistic effects are important. For computing the full S-Z signal\n",
-      "incorporating all of these effects, Jens Chluba has written a library:\n",
+      "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
       "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
@@ -89,14 +89,13 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "%matplotlib inline\n",
-      "from yt.mods import *\n",
-      "from yt.analysis_modules.api import SZProjection\n",
+      "import yt\n",
+      "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
-      "pf = load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
+      "ds = yt.load(\"enzo_tiny_cosmology/DD0046/DD0046\")\n",
       "\n",
       "freqs = [90.,180.,240.]\n",
-      "szprj = SZProjection(pf, freqs)"
+      "szprj = SZProjection(ds, freqs)"
      ],
      "language": "python",
      "metadata": {},
@@ -108,8 +107,8 @@
      "source": [
       "`freqs` is a list or array of frequencies in GHz at which the signal\n",
       "is to be computed. The `SZProjection` constructor also accepts the\n",
-      "optional keywords, **mue** (mean molecular weight for computing the\n",
-      "electron number density, 1.143 is the default) and **high_order** (set\n",
+      "optional keywords, `mue` (mean molecular weight for computing the\n",
+      "electron number density, 1.143 is the default) and `high_order` (set\n",
       "to True to compute terms in the S-Z signal expansion up to\n",
       "second-order in $T_{e,SZ}$ and $\\beta$). "
      ]
@@ -127,7 +126,7 @@
      "collapsed": false,
      "input": [
       "# An on-axis projection along the z-axis with width 10 Mpc, centered on the gas density maximum\n",
-      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"mpc\"), nx=400)"
+      "szprj.on_axis(\"z\", center=\"max\", width=(10.0, \"Mpc\"), nx=400)"
      ],
      "language": "python",
      "metadata": {},
@@ -144,7 +143,7 @@
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
       "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
-      "keywords for projections such as **center**, **width**, and **source**. The image buffer size can be controlled by setting **nx**.  \n"
+      "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },
     {
@@ -216,7 +215,7 @@
      "source": [
       "which would write all of the projections to a single FITS file,\n",
       "including coordinate information in kpc. The optional keyword\n",
-      "**clobber** allows a previous file to be overwritten. \n"
+      "`clobber` allows a previous file to be overwritten. \n"
      ]
     }
    ],

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -35,7 +35,7 @@
 
 .. code-block:: python
 
-  from yt.analysis_modules.api import AbsorptionSpectrum
+  from yt.analysis_modules.absorption_spectrum.api import AbsorptionSpectrum
 
   sp = AbsorptionSpectrum(900.0, 1800.0, 10000)
 

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -2,185 +2,135 @@
 
 Clump Finding
 =============
-.. sectionauthor:: Britton Smith <britton.smith at colorado.edu>
 
-``yt`` has the ability to identify topologically disconnected structures based in a dataset using 
-any field available.  This is powered by a contouring algorithm that runs in a recursive 
-fashion.  The user specifies the initial data object in which the clump-finding will occur, 
-the field over which the contouring will be done, the upper and lower limits of the 
-initial contour, and the contour increment.
+The clump finder uses a contouring algorithm to identified topologically 
+disconnected structures within a dataset.  This works by first creating a 
+single contour over the full range of the contouring field, then continually 
+increasing the lower value of the contour until it reaches the maximum value 
+of the field.  As disconnected structures are identified as separate contoures, 
+the routine continues recursively through each object, creating a hierarchy of 
+clumps.  Individual clumps can be kept or removed from the hierarchy based on 
+the result of user-specified functions, such as checking for gravitational 
+boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder begins by creating a single contour of the specified field over the entire 
-range given.  For every isolated contour identified in the initial iteration, contouring is 
-repeated with the same upper limit as before, but with the lower limit increased by the 
-specified increment.  This repeated for every isolated group until the lower limit is equal 
-to the upper limit.
+The clump finder requires a data container and a field over which the 
+contouring is to be performed.
 
-Often very tiny clumps can appear as groups of only a few cells that happen to be slightly 
-overdense (if contouring over density) with respect to the surrounding gas.  The user may 
-specify criteria that clumps must meet in order to be kept.  The most obvious example is 
-selecting only those clumps that are gravitationally bound.
+.. code:: python
 
-Once the clump-finder has finished, the user can write out a set of quantities for each clump in the 
-index.  Additional info items can also be added.  We also provide a recipe
-for finding clumps in :ref:`cookbook-find_clumps`.
+   import yt
+   from yt.analysis_modules.level_sets.api import *
 
-Treecode Optimization
----------------------
+   ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
 
-.. sectionauthor:: Stephen Skory <s at skory.us>
-.. versionadded:: 2.1
+   data_source = ds.disk([0.5, 0.5, 0.5], [0., 0., 1.],
+                         (8, 'kpc'), (1, 'kpc'))
 
-As mentioned above, the user has the option to limit clumps to those that are
-gravitationally bound.
-The correct and accurate way to calculate if a clump is gravitationally
-bound is to do the full double sum:
+   master_clump = Clump(data_source, ("gas", "density"))
 
-.. math::
+At this point, every isolated contour will be considered a clump, 
+whether this is physical or not.  Validator functions can be added to 
+determine if an individual contour should be considered a real clump.  
+These functions are specified with the ``Clump.add_validator`` function.  
+Current, two validators exist: a minimum number of cells and gravitational 
+boundedness.
 
-  PE = \Sigma_{i=1}^N \Sigma_{j=i}^N \frac{G M_i M_j}{r_{ij}}
+.. code:: python
 
-where :math:`PE` is the gravitational potential energy of :math:`N` cells,
-:math:`G` is the
-gravitational constant, :math:`M_i` is the mass of cell :math:`i`, 
-and :math:`r_{ij}` is the distance
-between cell :math:`i` and :math:`j`.
-The number of calculations required for this calculation
-grows with the square of :math:`N`. Therefore, for large clumps with many cells, the
-test for boundedness can take a significant amount of time.
+   master_clump.add_validator("min_cells", 20)
 
-An effective way to greatly speed up this calculation with minimal error
-is to use the treecode approximation pioneered by
-`Barnes and Hut (1986) <http://adsabs.harvard.edu/abs/1986Natur.324..446B>`_.
-This method of calculating gravitational potentials works by
-grouping individual masses that are located close together into a larger conglomerated
-mass with a geometric size equal to the distribution of the individual masses.
-For a mass cell that is sufficiently distant from the conglomerated mass,
-the gravitational calculation can be made using the conglomerate, rather than
-each individual mass, which saves time.
+   master_clump.add_validator("gravitationally_bound", use_particles=False)
 
-The decision whether or not to use a conglomerate depends on the accuracy control
-parameter ``opening_angle``. Using the small-angle approximation, a conglomerate
-may be used if its geometric size subtends an angle no greater than the
-``opening_angle`` upon the remote mass. The default value is
-``opening_angle = 1``, which gives errors well under 1%. A value of 
-``opening_angle = 0`` is identical to the full O(N^2) method, and larger values
-will speed up the calculation and sacrifice accuracy (see the figures below).
+As many validators as desired can be added, and a clump is only kept if all 
+return True.  If not, a clump is remerged into its parent.  Custom validators 
+can easily be added.  A validator function must only accept a ``Clump`` object 
+and either return True or False.
 
-The treecode method is iterative. Conglomerates may themselves form larger
-conglomerates. And if a larger conglomerate does not meet the ``opening_angle``
-criterion, the smaller conglomerates are tested as well. This iteration of 
-conglomerates will
-cease once the level of the original masses is reached (this is what happens
-for all pair calculations if ``opening_angle = 0``).
+.. code:: python
 
-Below are some examples of how to control the usage of the treecode.
+   def _minimum_gas_mass(clump, min_mass):
+       return (clump["gas", "cell_mass"].sum() >= min_mass)
+   add_validator("minimum_gas_mass", _minimum_gas_mass)
 
-This example will calculate the ratio of the potential energy to kinetic energy
-for a spherical clump using the treecode method with an opening angle of 2.
-The default opening angle is 1.0:
+The ``add_validator`` function adds the validator to a registry that can 
+be accessed by the clump finder.  Then, the validator can be added to the 
+clump finding just like the others.
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
-      treecode=True, opening_angle=2.0)
+.. code:: python
 
-This example will accomplish the same as the above, but will use the full
-N^2 method.
+   master_clump.add_validator("minimum_gas_mass", ds.quan(1.0, "Msun"))
 
-.. code-block:: python
-  
-  from yt.mods import *
-  
-  pf = load("DD0000")
-  sp = pf.sphere([0.5, 0.5, 0.5], radius=0.1)
-  
-  ratio = sp.quantities["IsBound"](truncate=False, include_thermal_energy=True,
-      treecode=False)
+The clump finding algorithm accepts the ``Clump`` object, the initial minimum 
+and maximum of the contouring field, and the step size.  The lower value of the 
+contour finder will be continually multiplied by the step size.
 
-Here the treecode method is specified for clump finding (this is default).
-Please see the link above for the full example of how to find clumps (the
-trailing backslash is important!):
+.. code:: python
 
-.. code-block:: python
-  
-  function_name = 'self.data.quantities["IsBound"](truncate=True, \
-      include_thermal_energy=True, treecode=True, opening_angle=2.0) > 1.0'
-  master_clump = amods.level_sets.Clump(data_source, None, field,
-      function=function_name)
+   c_min = data_source["gas", "density"].min()
+   c_max = data_source["gas", "density"].max()
+   step = 2.0
+   find_clumps(master_clump, c_min, c_max, step)
 
-To turn off the treecode, of course one should turn treecode=False in the
-example above.
+After the clump finding has finished, the master clump will represent the top 
+of a hierarchy of clumps.  The ``children`` attribute within a ``Clump`` object 
+contains a list of all sub-clumps.  Each sub-clump is also a ``Clump`` object 
+with its own ``children`` attribute, and so on.
 
-Treecode Speedup and Accuracy Figures
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+A number of helper routines exist for examining the clump hierarchy.
 
-Two datasets are used to make the three figures below. Each is a zoom-in
-simulation with high resolution in the middle with AMR, and then lower
-resolution static grids on the periphery. In this way they are very similar to
-a clump in a full-AMR simulation, where there are many AMR levels stacked
-around a density peak. One dataset has a total of 3 levels of AMR, and
-the other has 10 levels, but in other ways are very similar.
+.. code:: python
 
-The first figure shows the effect of varying the opening angle on the speed
-and accuracy of the treecode. The tests were performed using the L=10 
-dataset on a clump with approximately 118,000 cells. The speedup of up the
-treecode is in green, and the accuracy in blue, with the opening angle
-on the x-axis.
+   # Write a text file of the full hierarchy.
+   write_clump_index(master_clump, 0, "%s_clump_hierarchy.txt" % ds)
 
-With an ``opening_angle`` = 0, the accuracy is perfect, but the treecode is
-less than half as fast as the brute-force method. However, by an
-``opening_angle`` of 1, the treecode is now nearly twice as fast, with
-about 0.2% error. This trend continues to an ``opening_angle`` 8, where
-large opening angles have no effect due to geometry.
+   # Write a text file of only the leaf nodes.
+   write_clumps(master_clump,0, "%s_clumps.txt" % ds)
 
-.. image:: _images/TreecodeOpeningAngleBig.png
-   :width: 450
-   :height: 400
+   # Get a list of just the leaf nodes.
+   leaf_clumps = get_lowest_clumps(master_clump)
 
-Note that the accuracy is always below 1. The treecode will always underestimate
-the gravitational binding energy of a clump.
+``Clump`` objects can be used like all other data containers.
 
-In this next figure, the ``opening_angle`` is kept constant at 1, but the
-number of cells is varied on the L=3 dataset by slowly expanding a spherical
-region of analysis. Up to about 100,000 cells,
-the treecode is actually slower than the brute-force method. This is due to
-the fact that with fewer cells, smaller geometric distances,
-and a shallow AMR index, the treecode
-method has very little chance to be applied. The calculation is overall
-slower due to the overhead of the treecode method & startup costs. This
-explanation is further strengthened by the fact that the accuracy of the
-treecode method stay perfect for the first couple thousand cells, indicating
-that the treecode method is not being applied over that range.
+.. code:: python
 
-Once the number of cells gets high enough, and the size of the region becomes
-large enough, the treecode method can work its magic and the treecode method
-becomes advantageous.
+   print leaf_clumps[0]["gas", "density"]
+   print leaf_clumps[0].quantities.total_mass()
 
-.. image:: _images/TreecodeCellsSmall.png
-   :width: 450
-   :height: 400
+The writing functions will write out a series or properties about each 
+clump by default.  Additional properties can be appended with the 
+``Clump.add_info_item`` function.
 
-The saving grace to the figure above is that for small clumps, a difference of
-50% in calculation time is on the order of a second or less, which is tiny
-compared to the minutes saved for the larger clumps where the speedup can
-be greater than 3.
+.. code:: python
 
-The final figure is identical to the one above, but for the L=10 dataset.
-Due to the higher number of AMR levels, which translates into more opportunities
-for the treecode method to be applied, the treecode becomes faster than the
-brute-force method at only about 30,000 cells. The accuracy shows a different
-behavior, with a dip and a rise, and overall lower accuracy. However, at all
-times the error is still well under 1%, and the time savings are significant.
+   master_clump.add_info_item("total_cells")
 
-.. image:: _images/TreecodeCellsBig.png
-   :width: 450
-   :height: 400
+Just like the validators, custom info items can be added by defining functions 
+that minimally accept a ``Clump`` object and return a string to be printed.
 
-The figures above show that the treecode method is generally very advantageous,
-and that the error introduced is minimal.
+.. code:: python
+
+   def _mass_weighted_jeans_mass(clump):
+       jeans_mass = clump.data.quantities.weighted_average_quantity(
+           "jeans_mass", ("gas", "cell_mass")).in_units("Msun")
+       return "Jeans Mass (mass-weighted): %.6e Msolar." % jeans_mass
+   add_clump_info("mass_weighted_jeans_mass", _mass_weighted_jeans_mass)
+
+Then, add it to the list:
+
+.. code:: python
+
+   master_clump.add_info_item("mass_weighted_jeans_mass")
+
+By default, the following info items are activated: **total_cells**, 
+**cell_mass**, **mass_weighted_jeans_mass**, **volume_weighted_jeans_mass**, 
+**max_grid_level**, **min_number_density**, **max_number_density**, and 
+**distance_to_main_clump**.
+
+Clumps can be visualized using the ``annotate_clumps`` callback.
+
+.. code:: python
+
+   prj = yt.ProjectionPlot(ds, 2, ("gas", "density"), 
+                           center='c', width=(20,'kpc'))
+   prj.annotate_clumps(leaf_clumps)
+   prj.save('clumps')

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -58,8 +58,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  halo_list = parallelHF(pf)
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
 Ellipsoid Parameters
@@ -69,8 +69,8 @@
   from yt.mods import *
   from yt.analysis_modules.halo_finding.api import *
 
-  pf=load('Enzo_64/RD0006/RedshiftOutput0006')
-  haloes = LoadHaloes(pf, 'MyHaloList')
+  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
 object "haloes", you can get loop over the list of haloes and do
@@ -107,7 +107,7 @@
 
 .. code-block:: python
 
-  ell = pf.ellipsoid(ell_param[0],
+  ell = ds.ellipsoid(ell_param[0],
   ell_param[1],
   ell_param[2],
   ell_param[3],

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -8,6 +8,8 @@
    :maxdepth: 1
 
    halo_catalogs
+   halo_transition
    halo_finding
    halo_mass_function
+   halo_merger_tree
    halo_analysis_example

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
@@ -1,3 +1,5 @@
+.. _halo-analysis-example:
+
 Using HaloCatalogs to do Analysis
 ---------------------------------
 

diff -r d163d5f4389c646c397b8e5156ce07f81b9f8f04 -r a85d0f72d678234256292e5e93eab9a3b7837214 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -7,9 +7,11 @@
 together into a single framework. This framework is substantially
 different from the limited framework included in yt-2.x and is only 
 backwards compatible in that output from old halo finders may be loaded.
+For a direct translation of various halo analysis tasks using yt-2.x
+to yt-3.0 please see :ref:`halo-transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
-catalog through data_pf. These halos can be found using friends-of-friends,
+catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
 use. The available arguments are 'fof', 'hop', and'rockstar'. For more
 details on the relative differences between these halo finders see 
@@ -19,32 +21,32 @@
 
    from yt.mods import *
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, finder_method='hop')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
 This method is not implemented for previously run friends-of-friends or 
 HOP finders. Even though rockstar creates one file per processor, 
 specifying any one file allows the full catalog to be loaded. Here we 
 only specify the file output by the processor with ID 0. Note that the 
-argument for supplying a rockstar output is `halos_pf`, not `data_pf`.
+argument for supplying a rockstar output is `halos_ds`, not `data_ds`.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   hc = HaloCatalog(halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
 is sufficient for creating a halo catalog, it is not possible to find 
 any new information about the identified halos. To associate the halos 
 with the dataset from which they were found, supply arguments to both 
-halos_pf and data_pf.
+halos_ds and data_ds.
 
 .. code-block:: python
 
-   halos_pf = load(path+'rockstar_halos/halos_0.0.bin')
-   data_pf = load('Enzo_64/RD0006/RedshiftOutput0006')
-   hc = HaloCatalog(data_pf=data_pf, halos_pf=halos_pf)
+   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
 A data container can also be supplied via keyword data_source, 
 associated with either dataset, to control the spatial region in 
@@ -215,8 +217,8 @@
 
 .. code-block:: python
 
-   hpf = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
-   hc = HaloCatalog(halos_pf=hpf,
+   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")
@@ -226,4 +228,4 @@
 =======
 
 For a full example of how to use these methods together see 
-:ref:`halo_analysis_example`.
+:doc:`halo_analysis_example`.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/cb83d1e334cc/
Changeset:   cb83d1e334cc
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 16:48:31
Summary:     Removing merger remnant.
Affected #:  1 file

diff -r a85d0f72d678234256292e5e93eab9a3b7837214 -r cb83d1e334cc60baff03097220454e0f5f0c1cb9 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -14,7 +14,6 @@
 General Overview
 ----------------
 
-<<<<<<< local
 A halo mass function can be created for the halos identified in a cosmological 
 simulation, as well as analytic fits using any arbitrary set of cosmological
 paramters. In order to create a mass function for simulated halos, they must


https://bitbucket.org/yt_analysis/yt/commits/1c3db6e9095f/
Changeset:   1c3db6e9095f
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 16:51:15
Summary:     pf -> ds.
Affected #:  1 file

diff -r cb83d1e334cc60baff03097220454e0f5f0c1cb9 -r 1c3db6e9095febf3aef0f08e2da8888557a8b222 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -72,8 +72,8 @@
 
 hds0 = "rockstar_halos/halos_0.0.bin"
 hds1 = "rockstar_halos/halos_0.1.bin"
- at requires_pf(hds0)
- at requires_pf(hds1)
+ at requires_ds(hds0)
+ at requires_ds(hds1)
 def test_halo_mass_function():
     hds = data_dir_load(hds0)
     yield assert_equal, str(hds), "halos_0.0.bin"


https://bitbucket.org/yt_analysis/yt/commits/d3338a6bd565/
Changeset:   d3338a6bd565
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 16:53:17
Summary:     Clearing up some merger conflicts.
Affected #:  1 file

diff -r 1c3db6e9095febf3aef0f08e2da8888557a8b222 -r d3338a6bd565c29c9acb91db3bb047676f5b26e7 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -751,22 +751,8 @@
                         ds_fn, axis, field, weight_field,
                         dobj_name)
 
-def big_patch_amr(pf_fn, fields):
-    if not can_run_pf(pf_fn): return
-    dso = [ None, ("sphere", ("max", (0.1, 'unitary')))]
-    yield GridHierarchyTest(pf_fn)
-    yield ParentageRelationshipsTest(pf_fn)
-    for field in fields:
-        yield GridValuesTest(pf_fn, field)
-        for axis in [0, 1, 2]:
-            for ds in dso:
-                for weight_field in [None, "density"]:
-                    yield PixelizedProjectionValuesTest(
-                        pf_fn, axis, field, weight_field,
-                        ds)
-
 def hmf_sim_and_analytic(halos_ds):
-    if not can_run_pf(halos_ds): return
+    if not can_run_ds(halos_ds): return
     yield HaloMassFunctionTest(halos_ds)
 
 def create_obj(ds, obj_type):


https://bitbucket.org/yt_analysis/yt/commits/9b036eca6914/
Changeset:   9b036eca6914
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 17:26:53
Summary:     Updating field names
Affected #:  1 file

diff -r d3338a6bd565c29c9acb91db3bb047676f5b26e7 -r 9b036eca69144493ccda86387f91ad09baba65fb yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -255,9 +255,9 @@
     def set_mass_from_halos(self, which_limit):
         data_source = self.halos_ds.all_data()
         if which_limit is "min_mass":
-            self.log_mass_min = int(np.log10(np.amin(data_source['ParticleMassMsun'])))
+            self.log_mass_min = int(np.log10(np.amin(data_source["halos", "particle_mass"].in_units("Msun"))))
         if which_limit is "max_mass":
-            self.log_mass_max = int(np.log10(np.amax(data_source['ParticleMassMsun'])))+1
+            self.log_mass_max = int(np.log10(np.amax(data_source["halos", "particle_mass"].in_units("Msun"))))+1
     
     """
     Here's where we create the halo mass functions from simulated halos
@@ -265,7 +265,7 @@
     def create_sim_hmf(self):
         data_source = self.halos_ds.all_data()
         # We're going to use indices to count the number of halos above a given mass
-        masses_sim = np.sort(data_source['ParticleMassMsun'])
+        masses_sim = np.sort(data_source["halos", "particle_mass"].in_units("Msun"))
         # Determine the size of the simulation volume in comoving Mpc**3
         sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)


https://bitbucket.org/yt_analysis/yt/commits/6ddc2d811775/
Changeset:   6ddc2d811775
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 17:38:53
Summary:     Removing inheritance of ParallelAnalysisInterface.
Affected #:  1 file

diff -r 9b036eca69144493ccda86387f91ad09baba65fb -r 6ddc2d811775b3b2ea9861343d196973757a6494 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -16,16 +16,14 @@
 import numpy as np
 import math, time
 
-from yt.funcs import *
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelDummy, \
-    ParallelAnalysisInterface, \
-    parallel_blocking_call
+from yt.funcs import mylog
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.physical_ratios import \
     rho_crit_g_cm3_h2
-from yt.utilities.logger import ytLogger as mylog
 
-class HaloMassFcn(ParallelAnalysisInterface):
+class HaloMassFcn():
     r"""
     Initalize a HaloMassFcn object to analyze the distribution of halos as 
     a function of mass.  A mass function can be created for a set of 
@@ -158,7 +156,6 @@
     omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, 
     sigma8=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
     log_mass_max=None, num_sigma_bins=360, fitting_function=4):
-        ParallelAnalysisInterface.__init__(self)
         self.simulation_ds = simulation_ds
         self.halos_ds = halos_ds
         self.omega_matter0 = omega_matter0
@@ -285,7 +282,7 @@
         if analytic:
             if self.make_analytic:
                 fitname = prefix + '-analytic.dat'
-                fp = self.comm.write_on_root(fitname)
+                fp = open(fitname, "w")
                 line = \
                 """#Columns:
 #1. mass (M_solar)
@@ -308,7 +305,7 @@
         if simulated:
             if self.make_simulated:
                 haloname = prefix + '-simulated.dat'
-                fp = self.comm.write_on_root(haloname)
+                fp = open(haloname, "w")
                 line = \
                 """#Columns:
 #1. mass (M_solar)


https://bitbucket.org/yt_analysis/yt/commits/5947047520a9/
Changeset:   5947047520a9
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 17:49:12
Summary:     A little cleanup.
Affected #:  1 file

diff -r 6ddc2d811775b3b2ea9861343d196973757a6494 -r 5947047520a949604f78791a46ca8e6cf23e23c4 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -1,5 +1,5 @@
 """
-halo_mass_function - Halo Mass Function and supporting functions.
+Halo Mass Function and supporting functions.
 
 
 
@@ -252,9 +252,11 @@
     def set_mass_from_halos(self, which_limit):
         data_source = self.halos_ds.all_data()
         if which_limit is "min_mass":
-            self.log_mass_min = int(np.log10(np.amin(data_source["halos", "particle_mass"].in_units("Msun"))))
+            self.log_mass_min = \
+              int(np.log10(np.amin(data_source["halos", "particle_mass"].in_units("Msun"))))
         if which_limit is "max_mass":
-            self.log_mass_max = int(np.log10(np.amax(data_source["halos", "particle_mass"].in_units("Msun"))))+1
+            self.log_mass_max = \
+              int(np.log10(np.amax(data_source["halos", "particle_mass"].in_units("Msun"))))+1
     
     """
     Here's where we create the halo mass functions from simulated halos
@@ -284,11 +286,10 @@
                 fitname = prefix + '-analytic.dat'
                 fp = open(fitname, "w")
                 line = \
-                """#Columns:
-#1. mass (M_solar)
-#2. cumulative number density of halos (comoving (Mpc/h)^3)
-#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-"""
+                "#Columns:\n" + \
+                "#1. mass (M_solar)\n" + \
+                "#2. cumulative number density of halos (comoving (Mpc/h)^3)\n" + \
+                "#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)\n"
                 fp.write(line)
                 for i in xrange(self.masses_analytic.size - 1):
                     line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
@@ -298,19 +299,19 @@
                 fp.close()
             # If the analytic halo mass function wasn't created, warn the user
             else:
-                mylog.warning("The analytic halo mass function was not created and cannot be written \
-out! Specify its creation with HaloMassFcn(make_analytic=True, other_args) \
-when creating the HaloMassFcn object.")
+                mylog.warning("The analytic halo mass function was not created and cannot be " +
+                              "written out! Specify its creation with " +
+                              "HaloMassFcn(make_analytic=True, other_args) when creating the " +
+                              "HaloMassFcn object.")
         # Write out the simulated mass fucntion if it exists and was requested
         if simulated:
             if self.make_simulated:
                 haloname = prefix + '-simulated.dat'
                 fp = open(haloname, "w")
                 line = \
-                """#Columns:
-#1. mass (M_solar)
-#2. cumulative number density of halos (comoving (Mpc/h)^3)
-"""
+                "#Columns:\n" + \
+                "#1. mass (M_solar)\n" + \
+                "#2. cumulative number density of halos (comoving (Mpc/h)^3)\n"
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
                     line = "%e\t%e\n" % (self.masses_sim[i], 
@@ -319,10 +320,10 @@
                 fp.close()
             # If the simulated halo mass function wasn't created, warn the user
             else:
-                mylog.warning("The simulated halo mass function was not created and cannot be written \
-out! Specify its creation by providing a loaded halo dataset with \
-HaloMassFcn(ds_halos=loaded_halo_dataset, other_args) when creating \
-the HaloMassFcn object.")
+                mylog.warning("The simulated halo mass function was not created and cannot " +
+                              "be written out! Specify its creation by providing a loaded " +
+                              "halo dataset with HaloMassFcn(ds_halos=loaded_halo_dataset, " +
+                              "other_args) when creating the HaloMassFcn object.")
 
     def sigmaM(self):
         """


https://bitbucket.org/yt_analysis/yt/commits/bf6dff791aba/
Changeset:   bf6dff791aba
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 18:12:28
Summary:     Correcting comment on units in output files.
Affected #:  1 file

diff -r 5947047520a949604f78791a46ca8e6cf23e23c4 -r bf6dff791aba8ce77f8f2ffafd2dfc3bb3837b8f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -288,8 +288,8 @@
                 line = \
                 "#Columns:\n" + \
                 "#1. mass (M_solar)\n" + \
-                "#2. cumulative number density of halos (comoving (Mpc/h)^3)\n" + \
-                "#3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)\n"
+                "#2. cumulative number density of halos [comoving Mpc^-3]\n" + \
+                "#3. (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3]\n"
                 fp.write(line)
                 for i in xrange(self.masses_analytic.size - 1):
                     line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
@@ -310,8 +310,8 @@
                 fp = open(haloname, "w")
                 line = \
                 "#Columns:\n" + \
-                "#1. mass (M_solar)\n" + \
-                "#2. cumulative number density of halos (comoving (Mpc/h)^3)\n"
+                "#1. mass [Msun]\n" + \
+                "#2. cumulative number density of halos [comoving Mpc^-3]\n"
                 fp.write(line)
                 for i in xrange(self.masses_sim.size - 1):
                     line = "%e\t%e\n" % (self.masses_sim[i], 


https://bitbucket.org/yt_analysis/yt/commits/c749355e97df/
Changeset:   c749355e97df
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 19:52:33
Summary:     Updating halo mass function answer test.
Affected #:  2 files

diff -r bf6dff791aba8ce77f8f2ffafd2dfc3bb3837b8f -r c749355e97df3914c9f263dbc203c3ebab268501 yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -19,7 +19,7 @@
     small_patch_amr, \
     big_patch_amr, \
     data_dir_load, \
-    hmf_sim_and_analytic
+    SimHaloMassFunctionTest
 from yt.frontends.enzo.api import EnzoDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
@@ -70,17 +70,12 @@
         test_galaxy0030.__name__ = test.description
         yield test
 
-hds0 = "rockstar_halos/halos_0.0.bin"
-hds1 = "rockstar_halos/halos_0.1.bin"
- at requires_ds(hds0)
- at requires_ds(hds1)
-def test_halo_mass_function():
-    hds = data_dir_load(hds0)
-    yield assert_equal, str(hds), "halos_0.0.bin"
-    for test in hmf_sim_and_analytic(hds):
-    #    print "test.description: ", test.description
-    #    test_halo_mass_function.__name__ = test.description
-       yield test
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+ at requires_ds(enzotiny)
+def test_simulated_halo_mass_function():
+    ds = data_dir_load(enzotiny)
+    for finder in ["fof", "hop"]:
+        yield SimHaloMassFunctionTest(ds, finder)
 
 ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_ds(ecp, big_data=True)

diff -r bf6dff791aba8ce77f8f2ffafd2dfc3bb3837b8f -r c749355e97df3914c9f263dbc203c3ebab268501 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -576,35 +576,31 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-class HaloMassFunctionTest(AnswerTestingTest):
-    _type_name = "HaloMassFunction"
-    _attrs = ('halos_ds')
-    def __init__(self, halos_ds):
-        super(HaloMassFunctionTest, self).__init__(halos_ds)
-        self.halos_ds = halos_ds
+class SimHaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "SimHaloMassFunction"
+    _attrs = ("finder")
 
+    def __init__(self, ds_fn, finder):
+        super(HaloMassFunctionTest, self).__init__(ds_fn)
+        self.finder = finder
+    
     def run(self):
-        result = {}
+        from yt.analysis_modules.halo_analysis.api import HaloCatalog
         from yt.analysis_modules.halo_mass_function.api import HaloMassFcn
-        hmf = HaloMassFcn(halos_ds=self.halos_ds)
-        result["masses_sim"] = hmf.masses_sim
-        result["n_cumulative_sim"] = hmf.n_cumulative_sim
-        result["masses_analytic"] = hmf.masses_analytic
-        result["n_cumulative_analytic"] = hmf.n_cumulative_analytic
-        result["dndM_dM_analytic"] = hmf.dndM_dM_analytic
+        hc = HaloCatalog(data_ds=self.ds, finder_method=self.finder)
+        hc.create()
+        
+        hmf = HaloMassFcn(halos_ds=hc.halos_ds)
+        result = np.empty((2, hmf.masses_sim.size))
+        result[0] = hmf.masses_sim.d
+        result[1] = hmf.n_cumulative_sim.d
         return result
 
     def compare(self, new_result, old_result):
-        for newms, oldms in zip(new_result['masses_sim'], old_result['masses_sim']):
-            assert(newms == oldms)
-        for newncs, oldncs in zip(new_result['n_cumulative_sim'], old_result['n_cumulative_sim']):
-            assert(newncs == oldncs)
-        for newma, oldma in zip(new_result['masses_analytic'], old_result['masses_analytic']):
-            assert(newma == oldma)
-        for newnca, oldnca in zip(new_result['n_cumulative_analytic'], old_result['n_cumulative_analytic']):
-            assert(newnca == oldnca)
-        for newdndmdma, olddndmdma in zip(new_result['dndM_dM_analytic'], old_result['dndM_dM_analytic']):
-            assert(newdndmdma == olddndmdma)
+        err_msg = ("Simulated halo mass functions not equation for " +
+                   "%s halo finder.") % self.finder
+        assert_equal(new_result, old_result,
+                     err_msg=err_msg, verbose=True)
 
 def compare_image_lists(new_result, old_result, decimals):
     fns = ['old.png', 'new.png']
@@ -707,7 +703,7 @@
         return comp_imgs
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
-        
+
 
 def requires_ds(ds_fn, big_data = False, file_check = False):
     def ffalse(func):
@@ -751,10 +747,6 @@
                         ds_fn, axis, field, weight_field,
                         dobj_name)
 
-def hmf_sim_and_analytic(halos_ds):
-    if not can_run_ds(halos_ds): return
-    yield HaloMassFunctionTest(halos_ds)
-
 def create_obj(ds, obj_type):
     # obj_type should be tuple of
     #  ( obj_name, ( args ) )


https://bitbucket.org/yt_analysis/yt/commits/8722fa181897/
Changeset:   8722fa181897
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 20:03:02
Summary:     Adding analytic mass function test.
Affected #:  2 files

diff -r c749355e97df3914c9f263dbc203c3ebab268501 -r 8722fa1818973bbb32a9d34dea8c278cab79853a yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -19,7 +19,8 @@
     small_patch_amr, \
     big_patch_amr, \
     data_dir_load, \
-    SimHaloMassFunctionTest
+    AnalyticHaloMassFunctionTest, \
+    SimulatedHaloMassFunctionTest
 from yt.frontends.enzo.api import EnzoDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
@@ -75,7 +76,13 @@
 def test_simulated_halo_mass_function():
     ds = data_dir_load(enzotiny)
     for finder in ["fof", "hop"]:
-        yield SimHaloMassFunctionTest(ds, finder)
+        yield SimulatedHaloMassFunctionTest(ds, finder)
+
+ at requires_ds(enzotiny)
+def test_analytic_halo_mass_function():
+    ds = data_dir_load(enzotiny)
+    for fit in range(1, 5):
+        yield AnalyticHaloMassFunctionTest(ds, fit)
 
 ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_ds(ecp, big_data=True)

diff -r c749355e97df3914c9f263dbc203c3ebab268501 -r 8722fa1818973bbb32a9d34dea8c278cab79853a yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -576,8 +576,8 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
-class SimHaloMassFunctionTest(AnswerTestingTest):
-    _type_name = "SimHaloMassFunction"
+class SimulatedHaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "SimulatedHaloMassFunction"
     _attrs = ("finder")
 
     def __init__(self, ds_fn, finder):
@@ -602,6 +602,29 @@
         assert_equal(new_result, old_result,
                      err_msg=err_msg, verbose=True)
 
+class AnalyticHaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "AnalyticHaloMassFunction"
+    _attrs = ("fitting_function")
+
+    def __init__(self, ds_fn, fitting_function):
+        super(HaloMassFunctionTest, self).__init__(ds_fn)
+        self.fitting_function = fitting_function
+    
+    def run(self):
+        from yt.analysis_modules.halo_mass_function.api import HaloMassFcn
+        hmf = HaloMassFcn(simulation_ds=self.ds,
+                          fitting_function=self.fitting_function)
+        result = np.empty((2, hmf.masses_analytic.size))
+        result[0] = hmf.masses_analytic.d
+        result[1] = hmf.n_cumulative_analytic.d
+        return result
+
+    def compare(self, new_result, old_result):
+        err_msg = ("Analytic halo mass functions not equation for " +
+                   "fitting function %d.") % self.fitting_function
+        assert_equal(new_result, old_result,
+                     err_msg=err_msg, verbose=True)
+
 def compare_image_lists(new_result, old_result, decimals):
     fns = ['old.png', 'new.png']
     num_images = len(old_result)


https://bitbucket.org/yt_analysis/yt/commits/756c740c11ca/
Changeset:   756c740c11ca
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 20:04:07
Summary:     Fixing docstring.
Affected #:  1 file

diff -r 8722fa1818973bbb32a9d34dea8c278cab79853a -r 756c740c11ca833b0df952f93567ebe1511d465c yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -139,7 +139,7 @@
     the dataset.
 
     >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
-    >>> hmf = HaloMassFcn(ds=ds)
+    >>> hmf = HaloMassFcn(simulation_ds=ds)
     >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
     >>> plt.savefig("mass_function.png")
     


https://bitbucket.org/yt_analysis/yt/commits/6cf9de58eb55/
Changeset:   6cf9de58eb55
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 20:17:35
Summary:     Updating docs.
Affected #:  2 files

diff -r 756c740c11ca833b0df952f93567ebe1511d465c -r 6cf9de58eb551b69bfc7a2667d2669c0be53be70 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,3 +1,4 @@
+.. _halo_catalog
 
 Creating Halo Catalogs
 ======================

diff -r 756c740c11ca833b0df952f93567ebe1511d465c -r 6cf9de58eb551b69bfc7a2667d2669c0be53be70 doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -2,23 +2,20 @@
 
 Halo Mass Function
 ==================
-.. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
-.. versionadded:: 1.6
 
 The Halo Mass Function extension is capable of outputting the halo mass function
 for a collection halos (input), and/or an analytical fit over a given mass range
 for a set of specified cosmological parameters.
 This extension is based on code generously provided by Brian O'Shea.
 
-
 General Overview
 ----------------
 
 A halo mass function can be created for the halos identified in a cosmological 
 simulation, as well as analytic fits using any arbitrary set of cosmological
 paramters. In order to create a mass function for simulated halos, they must
-first be identified (using HOP, FOF, Parallel HOP, or Rockstar, see 
-:ref:`halo_finding`) and loaded as a halo dataset object. The distribution of
+first be identified (using HOP, FOF, or Rockstar, see 
+:ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
 halo masses will then be found, and can be compared to the analytic prediction
 at the same redshift and using the same cosmological parameters as were used
 in the simulation. Care should be taken in this regard, as the analytic fit
@@ -60,7 +57,7 @@
 The simplest way to create a halo mass function object is to simply pass it no
 arguments and let it use the default cosmological parameters.
 
-..code-block:: python
+.. code-block:: python
 
   from yt.analysis_modules.halo_mass_function.api import *
 
@@ -73,7 +70,7 @@
 can be extracted from the halo dataset, at the same redshift, spanning a similar
 range of halo masses.
 
-..code-block:: python
+.. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
@@ -84,7 +81,7 @@
 A simulation dataset can be passed along with additonal cosmological parameters 
 to create an analytic mass function.
 
-..code-block:: python
+.. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
@@ -96,7 +93,7 @@
 The analytic mass function can be created for a set of arbitrary cosmological 
 parameters without any dataset being passed as an argument.
 
-..code-block:: python
+.. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
@@ -105,8 +102,6 @@
                     omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
                     log_mass_min=5, log_mass_max=9, fitting_function=5)
 
-
-
 Keyword Arguments
 -----------------
 
@@ -205,6 +200,7 @@
 After the mass function has been created for both simulated halos and the
 corresponding analytic fits, they can be plotted though something along the 
 lines of
+
 .. code-block:: python
 
   from yt.mods import *
@@ -224,11 +220,11 @@
   hmf.write_out(prefix='hmf', analytic=True, simulated=True)
 
 This writes the files `hmf-analytic.dat' with columns 
-  * **mass** (M_solar)
-  * **(dn/dM)*dM** (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-  * **cumulative number density of halos** (comoving (Mpc/h)^3)
+  * mass [Msun]
+  * cumulative number density of halos [comoving Mpc^-3]
+  * (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3
 
 and the file `hmf-simulated.dat' with columns
-  * **mass** (M_solar)
-  * **log10 of mass** (M_solar)
-  * **cumulative number density of halos** (comoving (Mpc/h)^3)
\ No newline at end of file
+
+  * mass [Msun]
+  * cumulative number density of halos [comoving Mpc^-3]


https://bitbucket.org/yt_analysis/yt/commits/58a2035c7598/
Changeset:   58a2035c7598
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-28 20:18:02
Summary:     Removing old doc file.
Affected #:  2 files

diff -r 6cf9de58eb551b69bfc7a2667d2669c0be53be70 -r 58a2035c759824753a87b0668731978ee1df8720 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,4 +1,4 @@
-.. _halo_catalog
+.. _halo_catalog:
 
 Creating Halo Catalogs
 ======================

diff -r 6cf9de58eb551b69bfc7a2667d2669c0be53be70 -r 58a2035c759824753a87b0668731978ee1df8720 doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ /dev/null
@@ -1,143 +0,0 @@
-.. _hmf_howto:
-
-Halo Mass Function: Start to Finish
-===================================
-
-This how-to steps through the three simple steps it takes to find the halo
-mass function for an ``enzo`` dataset. The first step is to find the haloes,
-second to find the virial mass (gas + dark matter) contained in each halo using
-the halo profiler, and
-finally build a halo mass function of the haloes and an analytical fit
-for comparison.
-
-Halo Finding
-------------
-
-The first step is find the haloes in the simulation. There are a number of ways
-to do this explained in detail in :ref:`halo_finding`.
-You are encouraged to read about the differences between the halo finders and
-experiment with different settings and functions.
-For the purposes of
-the halo mass function, Friends of Friends (FOF) is probably not the best choice.
-Therefore, below
-is a simple example of how to run HOP on a dataset. This example will also
-write out a text file with the halo particulars, which will be used in the next
-step.
-
-.. code-block:: python
-
-  from yt.mods import *
-  ds = load("data0001")
-  halo_list = HaloFinder(ds)
-  halo_list.write_out("HopAnalysis.out")
-
-The only important columns of data in the text file ``HopAnalysis.out``
-are the halo number, center of
-mass, and maximum radius. These are the only values that the next step requires.
-
-Halo Profiling
---------------
-
-The halo profiler is a powerful tool that can analyze
-haloes in many ways. It is beneficial to read its documentation to become
-familiar with it before using it.
-For this exercise, only the virial mass of each
-halo is important. This script below will take the output from the previous step
-and find the virial mass for each halo, and save it to a text file.
-
-.. code-block:: python
-
-  import yt.analysis_modules.halo_profiler.api as HP
-  hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
-  hp.add_halo_filter(HP.VirialFilter,must_be_virialized=True,
-                overdensity_field='ActualOverdensity',
-                virial_overdensity=200,
-                virial_filters=[['TotalMassMsun','>=','1e8']],
-                virial_quantities=['TotalMassMsun','RadiusMpc'])
-  hp.make_profiles(filename="VirialHaloes.out")
-
-This script limits the output to virialized haloes with mass greater than or
-equal to 1e8 solar masses. If you run into problems, try pre-filtering problem
-haloes.
-
-Halo Mass Function
-------------------
-
-The halo mass function extension (see :ref:`halo_mass_function`) reads in the
-contents of ``VirialHaloes.out`` and can output files which can be used
-to make a plot. In this script below, the results from the previous step are
-read in, analyzed, and at the same time an analytical fit is calculated, and
-finally the results are written out to two files. The plot found from the haloes
-is saved to a file named ``hmf-haloes.dat`` and the fit to ``hmf-fit.dat``.
-For these files, the columns that are most likely to be needed for plotting are
-the first (halo mass bin), and the fourth and third (cumulative number density
-of haloes) for the ``-fit.dat`` and ``-haloes.dat`` files. See
-the full halo mass function documentation for more details on the contents of
-the files.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_mass_function.api import *
-  ds = load("data0001")
-  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4, mass_column=5, num_sigma_bins=200)
-  hmf.write_out(prefix='hmf')
-
-Inside the call to ``HaloMassFcn`` there are several hand-set parameters that 
-*must* be correct for the analytical fit to be correct. The three cosmological
-parameters (``sigma8input``, ``primordial_index`` and ``omega_baryon0``) are
-not stored with ``Enzo`` datasets, so they must be found from the initial
-conditions of the simulation. ``mass_column`` is set to 5 because that is the
-zero-indexed ordinal of the mass column in the ``VirialHaloes.out`` file.
-``num_sigma_bins`` controls how many mass bins the haloes are dropped into,
-and ``fitting_function`` controls which analytical function to use.
-
-Putting it All Together
------------------------
-
-It is not necessary to run each step separately from the others. This example
-below will run all steps at once.
-
-.. code-block:: python
-
-  from yt.mods import *
-  import yt.analysis_modules.halo_profiler.api as HP
-  from yt.analysis_modules.halo_mass_function.api import *
-  
-  # If desired, start loop here.
-  ds = load("data0001")
-  
-  halo_list = HaloFinder(ds)
-  halo_list.write_out("HopAnalysis.out")
-  
-  hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
-  hp.add_halo_filter(HP.VirialFilter,must_be_virialized=True,
-                overdensity_field='ActualOverdensity',
-                virial_overdensity=200,
-                virial_filters=[['TotalMassMsun','>=','1e8']],
-                virial_quantities=['TotalMassMsun','RadiusMpc'])
-  hp.make_profiles(filename="VirialHaloes.out")
-  
-  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4, mass_column=5, num_sigma_bins=200)
-  hmf.write_out(prefix='hmf')
-  # End loop here.
-
-The script above will work in parallel which can reduce runtimes substantially.
-If this analysis is to be run on a sequence of datasets, the section that needs
-to be inside the loop is shown bracketed by comments. Be careful how the
-output files are named as to not over-write output from previous loop cycles.
-
-Plotting
---------
-
-When plotting the output, be careful about the units of the output for the
-halo mass function. The figure shown in the documentation (on this page:
-:ref:`halo_mass_function`) has the number density of haloes per (h^-1 Mpc)^3,
-which is different than the output of the halo mass extension (which is
-haloes per (Mpc)^3). To get the same units as the figure for the ``-fit.dat``
-and ``-haloes.dat`` files, divide the fourth and third column by the comoving
-volume cubed of the simulation, respectively when plotting.


https://bitbucket.org/yt_analysis/yt/commits/e3b4e7436722/
Changeset:   e3b4e7436722
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-29 10:17:42
Summary:     Fixing.
Affected #:  1 file

diff -r 58a2035c759824753a87b0668731978ee1df8720 -r e3b4e74367221f8b1393deb1e26dfd34af6f3325 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -581,7 +581,7 @@
     _attrs = ("finder")
 
     def __init__(self, ds_fn, finder):
-        super(HaloMassFunctionTest, self).__init__(ds_fn)
+        super(SimulatedHaloMassFunctionTest, self).__init__(ds_fn)
         self.finder = finder
     
     def run(self):
@@ -607,7 +607,7 @@
     _attrs = ("fitting_function")
 
     def __init__(self, ds_fn, fitting_function):
-        super(HaloMassFunctionTest, self).__init__(ds_fn)
+        super(AnalyticHaloMassFunctionTest, self).__init__(ds_fn)
         self.fitting_function = fitting_function
     
     def run(self):


https://bitbucket.org/yt_analysis/yt/commits/8fe76a2bbd79/
Changeset:   8fe76a2bbd79
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-29 12:44:05
Summary:     Adding comoving units to ds object returned by halo finding.
Affected #:  1 file

diff -r e3b4e74367221f8b1393deb1e26dfd34af6f3325 -r 8fe76a2bbd79b5f7a5af864069a8946e6eb31151 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,6 +21,7 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.units.dimensions import length
 from yt.utilities.operator_registry import \
      OperatorRegistry
 
@@ -136,5 +137,13 @@
         attr_val = getattr(data_ds, attr)
         setattr(particle_ds, attr, attr_val)
     particle_ds.current_time = particle_ds.current_time.in_cgs()
+
+    particle_ds.unit_registry.modify("h", particle_ds.hubble_constant)
+    # Comoving lengths
+    for my_unit in ["m", "pc", "AU", "au"]:
+        new_unit = "%scm" % my_unit
+        particle_ds.unit_registry.add(new_unit, particle_ds.unit_registry.lut[my_unit][0] /
+                                      (1 + particle_ds.current_redshift),
+                                      length, "\\rm{%s}/(1+z)" % my_unit)
     
     return particle_ds


https://bitbucket.org/yt_analysis/yt/commits/476dd7fef900/
Changeset:   476dd7fef900
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-29 12:44:53
Summary:     Being more generous with where particle masses come from.
Affected #:  1 file

diff -r 8fe76a2bbd79b5f7a5af864069a8946e6eb31151 -r 476dd7fef90022896fcc8b19c995338d2c4946d9 yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -253,10 +253,10 @@
         data_source = self.halos_ds.all_data()
         if which_limit is "min_mass":
             self.log_mass_min = \
-              int(np.log10(np.amin(data_source["halos", "particle_mass"].in_units("Msun"))))
+              int(np.log10(np.amin(data_source["particle_mass"].in_units("Msun"))))
         if which_limit is "max_mass":
             self.log_mass_max = \
-              int(np.log10(np.amax(data_source["halos", "particle_mass"].in_units("Msun"))))+1
+              int(np.log10(np.amax(data_source["particle_mass"].in_units("Msun"))))+1
     
     """
     Here's where we create the halo mass functions from simulated halos
@@ -264,7 +264,7 @@
     def create_sim_hmf(self):
         data_source = self.halos_ds.all_data()
         # We're going to use indices to count the number of halos above a given mass
-        masses_sim = np.sort(data_source["halos", "particle_mass"].in_units("Msun"))
+        masses_sim = np.sort(data_source["particle_mass"].in_units("Msun"))
         # Determine the size of the simulation volume in comoving Mpc**3
         sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
         n_cumulative_sim = np.arange(len(masses_sim),0,-1)


https://bitbucket.org/yt_analysis/yt/commits/63bc23c35bcc/
Changeset:   63bc23c35bcc
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-29 12:45:11
Summary:     SimulatedHaloMassFunctionTest should now work.
Affected #:  1 file

diff -r 476dd7fef90022896fcc8b19c995338d2c4946d9 -r 63bc23c35bcc82d18905f32274d2afc9bf93986c yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -81,7 +81,7 @@
 @requires_ds(enzotiny)
 def test_analytic_halo_mass_function():
     ds = data_dir_load(enzotiny)
-    for fit in range(1, 5):
+    for fit in range(1, 6):
         yield AnalyticHaloMassFunctionTest(ds, fit)
 
 ecp = "enzo_cosmology_plus/DD0046/DD0046"


https://bitbucket.org/yt_analysis/yt/commits/6c076d84acf1/
Changeset:   6c076d84acf1
Branch:      yt-3.0
User:        brittonsmith
Date:        2014-07-29 12:46:01
Summary:     Fixing tests?
Affected #:  1 file

diff -r 63bc23c35bcc82d18905f32274d2afc9bf93986c -r 6c076d84acf1ef762c458594bdce2c8793cd269d yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -578,7 +578,7 @@
 
 class SimulatedHaloMassFunctionTest(AnswerTestingTest):
     _type_name = "SimulatedHaloMassFunction"
-    _attrs = ("finder")
+    _attrs = ("finder",)
 
     def __init__(self, ds_fn, finder):
         super(SimulatedHaloMassFunctionTest, self).__init__(ds_fn)
@@ -604,7 +604,7 @@
 
 class AnalyticHaloMassFunctionTest(AnswerTestingTest):
     _type_name = "AnalyticHaloMassFunction"
-    _attrs = ("fitting_function")
+    _attrs = ("fitting_function",)
 
     def __init__(self, ds_fn, fitting_function):
         super(AnalyticHaloMassFunctionTest, self).__init__(ds_fn)


https://bitbucket.org/yt_analysis/yt/commits/b2a39372ea82/
Changeset:   b2a39372ea82
Branch:      yt-3.0
User:        xarthisius
Date:        2014-07-29 22:13:15
Summary:     Merged in brittonsmith/yt/yt-3.0 (pull request #1086)

Updating Halo Mass Function
Affected #:  8 files

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,3 +1,4 @@
+.. _halo_catalog:
 
 Creating Halo Catalogs
 ======================

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c doc/source/analyzing/analysis_modules/halo_mass_function.rst
--- a/doc/source/analyzing/analysis_modules/halo_mass_function.rst
+++ b/doc/source/analyzing/analysis_modules/halo_mass_function.rst
@@ -2,29 +2,34 @@
 
 Halo Mass Function
 ==================
-.. sectionauthor:: Stephen Skory <sskory at physics.ucsd.edu>
-.. versionadded:: 1.6
 
 The Halo Mass Function extension is capable of outputting the halo mass function
-for a collection haloes (input), and/or an analytical fit over a given mass range
+for a collection halos (input), and/or an analytical fit over a given mass range
 for a set of specified cosmological parameters.
-
 This extension is based on code generously provided by Brian O'Shea.
 
 General Overview
 ----------------
 
-In order to run this extension on a dataset, the haloes need to be located
-(using HOP, FOF or Parallel HOP, see :ref:`halo_finding`),
-and their virial masses determined using the
-HaloProfiler.
-Please see the step-by-step how-to which puts these steps together
-(:ref:`hmf_howto`).
-If an optional analytical fit is desired, the correct initial
-cosmological parameters will need to be input as well. These initial parameters
-are not stored in an Enzo dataset, so they must be set by hand.
-An analytical fit can be found without referencing a particular dataset or
-set of haloes, but all the cosmological parameters need to be set by hand.
+A halo mass function can be created for the halos identified in a cosmological 
+simulation, as well as analytic fits using any arbitrary set of cosmological
+paramters. In order to create a mass function for simulated halos, they must
+first be identified (using HOP, FOF, or Rockstar, see 
+:ref:`halo_catalog`) and loaded as a halo dataset object. The distribution of
+halo masses will then be found, and can be compared to the analytic prediction
+at the same redshift and using the same cosmological parameters as were used
+in the simulation. Care should be taken in this regard, as the analytic fit
+requires the specification of cosmological parameters that are not necessarily 
+stored in the halo or simulation datasets, and must be specified by the user.
+Efforts have been made to set reasonable defaults for these parameters, but 
+setting them to identically match those used in the simulation will produce a
+much better comparison.
+
+Analytic halo mass functions can also be created without a halo dataset by 
+providing either a simulation dataset or specifying cosmological parameters by
+hand. yt includes 5 analytic fits for the halo mass function which can be
+selected.
+
 
 Analytical Fits
 ---------------
@@ -45,115 +50,181 @@
 The Tinker fit is for the :math:`\Delta=300` fits given in the paper, which
 appears to fit HOP threshold=80.0 fairly well.
 
-Analyze Simulated Haloes
-------------------------
 
-If an analytical fit is not needed, it is simple to analyze a set of 
-haloes. The ``halo_file`` needs to be specified, and
-``fitting_function`` does not need to be specified.
-``num_sigma_bins`` is how many bins the halo masses are sorted into.
-The default is 360. ``mass_column`` is the zero-indexed column of the
-``halo_file`` file that contains the halo masses. The default is 5, which
-corresponds to the sixth column of data in the file.
+Basic Halo Mass Function Creation
+---------------------------------
+
+The simplest way to create a halo mass function object is to simply pass it no
+arguments and let it use the default cosmological parameters.
+
+.. code-block:: python
+
+  from yt.analysis_modules.halo_mass_function.api import *
+
+  hmf = HaloMassFcn()
+
+This will create a HaloMassFcn object off of which arrays holding the information
+about the analytic mass function hang. Creating the halo mass function for a set
+of simulated halos requires only the loaded halo dataset to be passed as an 
+argument. This also creates the analytic mass function using all parameters that 
+can be extracted from the halo dataset, at the same redshift, spanning a similar
+range of halo masses.
 
 .. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  ds = load("data0030")
-  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", num_sigma_bins=200,
-  mass_column=5)
 
-Attached to ``hmf`` is the convenience function ``write_out``, which saves
-the halo mass function to a text file. By default, both the halo analysis (``haloes``) and
-fit (``fit``) are written to (different) text files, but they can be turned on or off
-explicitly. ``prefix`` sets the name used for the file(s). The haloes file
-is named ``prefix-haloes.dat``, and the fit file ``prefix-fit.dat``.
-Continued from above, invoking this command:
+  my_halos = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=my_halos)
 
-.. code-block:: python
-
-  hmf.write_out(prefix='hmf', fit=False, haloes=True)
-
-will save the haloes data to a file named ``hmf-haloes.dat``. The contents
-of the ``-haloes.dat`` file is three columns:
-
-  1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
-  2. mass (Msolar/h) for this bin.
-  3. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
-
-Analytical Halo Mass Function Fit
----------------------------------
-
-When an analytical fit is desired, in nearly all cases several cosmological
-parameters will need to be specified by hand. These parameters are not
-stored with Enzo datasets. In the case where both the haloes and an analytical
-fit are desired, the analysis is instantiated as below.
-``sigma8input``, ``primordial_index`` and ``omega_baryon0`` should be set to
-the same values as
-``PowerSpectrumSigma8``, ``PowerSpectrumPrimordialIndex`` and
-``CosmologyOmegaBaryonNow`` from the
-`inits <http://lca.ucsd.edu/projects/enzo/wiki/UserGuide/RunningInits>`_
-parameter file used to set up the simulation.
-``fitting_function`` is set to values 1 through 4 from the list of available
-fits above.
+A simulation dataset can be passed along with additonal cosmological parameters 
+to create an analytic mass function.
 
 .. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  ds = load("data0030")
-  hmf = HaloMassFcn(ds, halo_file="FilteredQuantities.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4)
-  hmf.write_out(prefix='hmf')
 
-Both the ``-haloes.dat`` and ``-fit.dat`` files are written to disk.
-The contents of the ``-fit.dat`` file is four columns:
+  my_ds = load("RD0027/RedshiftOutput0027")
+  hmf = HaloMassFcn(simulation_ds=my_ds, omega_baryon0=0.05, primordial_index=0.96, 
+                    sigma8 = 0.8, log_mass_min=5, log_mass_max=9)
 
-  1. log10 of mass (Msolar, NOT Msolar/h) for this bin.
-  2. mass (Msolar/h) for this bin.
-  3. (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3) in this bin.
-  4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3) in this bin.
-
-Below is an example of the output for both the haloes and the (Warren)
-analytical fit, for three datasets. The black lines are the calculated
-halo mass functions, and the blue lines the analytical fit set by initial
-conditions. This simulation shows typical behavior, in that there are too
-few small haloes compared to the fit due to lack of mass and gravity resolution
-for small haloes. But at higher mass ranges, the simulated haloes are quite close
-to the analytical fit.
-
-.. image:: _images/halo_mass_function.png
-   :width: 350
-   :height: 400
-
-The analytical fit can be found without referencing a particular dataset. In this
-case, all the various cosmological parameters need to be specified by hand.
-``omega_matter0`` is the fraction of universe that is made up of matter
-(baryons and dark matter). ``omega_lambda0`` is the fractional proportion due
-to dark energy. In a flat universe, ``omega_matter0`` + ``omega_lambda0`` = 1.
-``this_redshift`` is the redshift for which you wish to generate a fit.
-``log_mass_min`` and ``log_mass_max`` are the logarithmic ends of the mass range for which
-you wish to calculate the fit.
+The analytic mass function can be created for a set of arbitrary cosmological 
+parameters without any dataset being passed as an argument.
 
 .. code-block:: python
 
   from yt.mods import *
   from yt.analysis_modules.halo_mass_function.api import *
-  hmf = HaloMassFcn(None, omega_matter0=0.3, omega_lambda0=0.7,
-  omega_baryon0=0.06, hubble0=.7, this_redshift=0., log_mass_min=8.,
-  log_mass_max=13., sigma8input=0.9, primordial_index=1.,
-  fitting_function=1)
-  hmf.write_out(prefix="hmf-press-schechter", fit=True, haloes=False)
 
-It is possible to access the output of the halo mass function without saving
-to disk. The content is stored in arrays hanging off the ``HaloMassFcn``
-object:
+  hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+                    omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
+                    log_mass_min=5, log_mass_max=9, fitting_function=5)
 
-  * ``hmf.logmassarray`` for log10 of mass bin.
-  * ``hmf.massarray`` for mass bin.
-  * ``hmf.dn_M_z`` for (dn/dM)*dM (analytical fit).
-  * ``hmf.nofmz_cum`` for cumulative number density of halos (analytical fit).
-  * ``hmf.dis`` for cumulative number density of halos (from provided halo
-    halo information).
+Keyword Arguments
+-----------------
+
+  * **simulation_ds** (*Simulation dataset object*)
+    The loaded simulation dataset, used to set cosmological paramters.
+    Default : None.
+
+  * **halos_ds** (*Halo dataset object*)
+    The halos from a simulation to be used for creation of the 
+    halo mass function in the simulation.
+    Default : None.
+
+  * **make_analytic** (*bool*)
+    Whether or not to calculate the analytic mass function to go with 
+    the simulated halo mass function.  Automatically set to true if a 
+    simulation dataset is provided.
+    Default : True.
+
+  * **omega_matter0** (*float*)
+    The fraction of the universe made up of matter (dark and baryonic). 
+    Default : 0.2726.
+
+  * **omega_lambda0** (*float*)
+    The fraction of the universe made up of dark energy. 
+    Default : 0.7274.
+
+  * **omega_baryon0**  (*float*)
+    The fraction of the universe made up of baryonic matter. This is not 
+    always stored in the datset and should be checked by hand.
+    Default : 0.0456.
+
+  * **hubble0** (*float*)
+    The expansion rate of the universe in units of 100 km/s/Mpc. 
+    Default : 0.704.
+
+  * **sigma8** (*float*)
+    The amplitude of the linear power spectrum at z=0 as specified by 
+    the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
+    8 Mpc/h. This is not always stored in the datset and should be 
+    checked by hand.
+    Default : 0.86.
+
+  * **primoridal_index** (*float*)
+    This is the index of the mass power spectrum before modification by 
+    the transfer function. A value of 1 corresponds to the scale-free 
+    primordial spectrum. This is not always stored in the datset and 
+    should be checked by hand.
+    Default : 1.0.
+
+  * **this_redshift** (*float*)
+    The current redshift. 
+    Default : 0.
+
+  * **log_mass_min** (*float*)
+    The log10 of the mass of the minimum of the halo mass range. This is
+    set automatically by the range of halo masses if a simulated halo 
+    dataset is provided. If a halo dataset if not provided and no value
+    is specified, it will be set to 5. Units: M_solar
+    Default : None.
+
+  * **log_mass_max** (*float*)
+    The log10 of the mass of the maximum of the halo mass range. This is
+    set automatically by the range of halo masses if a simulated halo 
+    dataset is provided. If a halo dataset if not provided and no value
+    is specified, it will be set to 16. Units: M_solar
+    Default : None.
+
+  * **num_sigma_bins** (*float*)
+    The number of bins (points) to use for the calculation of the 
+    analytic mass function. 
+    Default : 360.
+
+  * **fitting_function** (*int*)
+    Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+    3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
+    Default : 4.
+
+
+Outputs
+-------
+
+A HaloMassFnc object has several arrays hanging off of it containing the 
+  * **masses_sim**: Halo masses from simulated halos. Units: M_solar
+
+  * **n_cumulative_sim**: Number density of halos with mass greater than the 
+    corresponding mass in masses_sim. Units: comoving Mpc^-3
+
+  * **masses_analytic**: Masses used for the generation of the analytic mass 
+    function. Units: M_solar
+
+  * **n_cumulative_analytic**: Number density of halos with mass greater then 
+    the corresponding mass in masses_analytic. Units: comoving Mpc^-3
+
+  * **dndM_dM_analytic**: Differential number density of halos, (dn/dM)*dM.
+
+After the mass function has been created for both simulated halos and the
+corresponding analytic fits, they can be plotted though something along the 
+lines of
+
+.. code-block:: python
+
+  from yt.mods import *
+  from yt.analysis_modules.halo_mass_function.api import *
+  import matplotlib.pyplot as plt
+
+  my_halos = load("rockstar_halos/halos_0.0.bin")
+  hmf = HaloMassFcn(halos_ds=my_halos)
+
+  plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
+  plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
+
+Attached to ``hmf`` is the convenience function ``write_out``, which saves the 
+halo mass function to a text file. (continued from above)
+.. code-block:: python
+
+  hmf.write_out(prefix='hmf', analytic=True, simulated=True)
+
+This writes the files `hmf-analytic.dat' with columns 
+  * mass [Msun]
+  * cumulative number density of halos [comoving Mpc^-3]
+  * (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3
+
+and the file `hmf-simulated.dat' with columns
+
+  * mass [Msun]
+  * cumulative number density of halos [comoving Mpc^-3]

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c doc/source/analyzing/analysis_modules/hmf_howto.rst
--- a/doc/source/analyzing/analysis_modules/hmf_howto.rst
+++ /dev/null
@@ -1,143 +0,0 @@
-.. _hmf_howto:
-
-Halo Mass Function: Start to Finish
-===================================
-
-This how-to steps through the three simple steps it takes to find the halo
-mass function for an ``enzo`` dataset. The first step is to find the haloes,
-second to find the virial mass (gas + dark matter) contained in each halo using
-the halo profiler, and
-finally build a halo mass function of the haloes and an analytical fit
-for comparison.
-
-Halo Finding
-------------
-
-The first step is find the haloes in the simulation. There are a number of ways
-to do this explained in detail in :ref:`halo_finding`.
-You are encouraged to read about the differences between the halo finders and
-experiment with different settings and functions.
-For the purposes of
-the halo mass function, Friends of Friends (FOF) is probably not the best choice.
-Therefore, below
-is a simple example of how to run HOP on a dataset. This example will also
-write out a text file with the halo particulars, which will be used in the next
-step.
-
-.. code-block:: python
-
-  from yt.mods import *
-  ds = load("data0001")
-  halo_list = HaloFinder(ds)
-  halo_list.write_out("HopAnalysis.out")
-
-The only important columns of data in the text file ``HopAnalysis.out``
-are the halo number, center of
-mass, and maximum radius. These are the only values that the next step requires.
-
-Halo Profiling
---------------
-
-The halo profiler is a powerful tool that can analyze
-haloes in many ways. It is beneficial to read its documentation to become
-familiar with it before using it.
-For this exercise, only the virial mass of each
-halo is important. This script below will take the output from the previous step
-and find the virial mass for each halo, and save it to a text file.
-
-.. code-block:: python
-
-  import yt.analysis_modules.halo_profiler.api as HP
-  hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
-  hp.add_halo_filter(HP.VirialFilter,must_be_virialized=True,
-                overdensity_field='ActualOverdensity',
-                virial_overdensity=200,
-                virial_filters=[['TotalMassMsun','>=','1e8']],
-                virial_quantities=['TotalMassMsun','RadiusMpc'])
-  hp.make_profiles(filename="VirialHaloes.out")
-
-This script limits the output to virialized haloes with mass greater than or
-equal to 1e8 solar masses. If you run into problems, try pre-filtering problem
-haloes.
-
-Halo Mass Function
-------------------
-
-The halo mass function extension (see :ref:`halo_mass_function`) reads in the
-contents of ``VirialHaloes.out`` and can output files which can be used
-to make a plot. In this script below, the results from the previous step are
-read in, analyzed, and at the same time an analytical fit is calculated, and
-finally the results are written out to two files. The plot found from the haloes
-is saved to a file named ``hmf-haloes.dat`` and the fit to ``hmf-fit.dat``.
-For these files, the columns that are most likely to be needed for plotting are
-the first (halo mass bin), and the fourth and third (cumulative number density
-of haloes) for the ``-fit.dat`` and ``-haloes.dat`` files. See
-the full halo mass function documentation for more details on the contents of
-the files.
-
-.. code-block:: python
-
-  from yt.mods import *
-  from yt.analysis_modules.halo_mass_function.api import *
-  ds = load("data0001")
-  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4, mass_column=5, num_sigma_bins=200)
-  hmf.write_out(prefix='hmf')
-
-Inside the call to ``HaloMassFcn`` there are several hand-set parameters that 
-*must* be correct for the analytical fit to be correct. The three cosmological
-parameters (``sigma8input``, ``primordial_index`` and ``omega_baryon0``) are
-not stored with ``Enzo`` datasets, so they must be found from the initial
-conditions of the simulation. ``mass_column`` is set to 5 because that is the
-zero-indexed ordinal of the mass column in the ``VirialHaloes.out`` file.
-``num_sigma_bins`` controls how many mass bins the haloes are dropped into,
-and ``fitting_function`` controls which analytical function to use.
-
-Putting it All Together
------------------------
-
-It is not necessary to run each step separately from the others. This example
-below will run all steps at once.
-
-.. code-block:: python
-
-  from yt.mods import *
-  import yt.analysis_modules.halo_profiler.api as HP
-  from yt.analysis_modules.halo_mass_function.api import *
-  
-  # If desired, start loop here.
-  ds = load("data0001")
-  
-  halo_list = HaloFinder(ds)
-  halo_list.write_out("HopAnalysis.out")
-  
-  hp = HP.HaloProfiler("data0001", halo_list_file='HopAnalysis.out')
-  hp.add_halo_filter(HP.VirialFilter,must_be_virialized=True,
-                overdensity_field='ActualOverdensity',
-                virial_overdensity=200,
-                virial_filters=[['TotalMassMsun','>=','1e8']],
-                virial_quantities=['TotalMassMsun','RadiusMpc'])
-  hp.make_profiles(filename="VirialHaloes.out")
-  
-  hmf = HaloMassFcn(ds, halo_file="VirialHaloes.out", 
-  sigma8input=0.9, primordial_index=1., omega_baryon0=0.06,
-  fitting_function=4, mass_column=5, num_sigma_bins=200)
-  hmf.write_out(prefix='hmf')
-  # End loop here.
-
-The script above will work in parallel which can reduce runtimes substantially.
-If this analysis is to be run on a sequence of datasets, the section that needs
-to be inside the loop is shown bracketed by comments. Be careful how the
-output files are named as to not over-write output from previous loop cycles.
-
-Plotting
---------
-
-When plotting the output, be careful about the units of the output for the
-halo mass function. The figure shown in the documentation (on this page:
-:ref:`halo_mass_function`) has the number density of haloes per (h^-1 Mpc)^3,
-which is different than the output of the halo mass extension (which is
-haloes per (Mpc)^3). To get the same units as the figure for the ``-fit.dat``
-and ``-haloes.dat`` files, divide the fourth and third column by the comoving
-volume cubed of the simulation, respectively when plotting.

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- a/yt/analysis_modules/halo_analysis/halo_finding_methods.py
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -21,6 +21,7 @@
     HaloCatalogDataset
 from yt.frontends.stream.data_structures import \
     load_particles
+from yt.units.dimensions import length
 from yt.utilities.operator_registry import \
      OperatorRegistry
 
@@ -136,5 +137,13 @@
         attr_val = getattr(data_ds, attr)
         setattr(particle_ds, attr, attr_val)
     particle_ds.current_time = particle_ds.current_time.in_cgs()
+
+    particle_ds.unit_registry.modify("h", particle_ds.hubble_constant)
+    # Comoving lengths
+    for my_unit in ["m", "pc", "AU", "au"]:
+        new_unit = "%scm" % my_unit
+        particle_ds.unit_registry.add(new_unit, particle_ds.unit_registry.lut[my_unit][0] /
+                                      (1 + particle_ds.current_redshift),
+                                      length, "\\rm{%s}/(1+z)" % my_unit)
     
     return particle_ds

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -1,5 +1,5 @@
 """
-halo_mass_function - Halo Mass Function and supporting functions.
+Halo Mass Function and supporting functions.
 
 
 
@@ -16,198 +16,314 @@
 import numpy as np
 import math, time
 
-from yt.funcs import *
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelDummy, \
-    ParallelAnalysisInterface, \
-    parallel_blocking_call
-from yt.utilities.physical_constants import \
-    cm_per_mpc, \
-    mass_sun_cgs
+from yt.funcs import mylog
+from yt.units.yt_array import \
+    YTArray, \
+    YTQuantity
 from yt.utilities.physical_ratios import \
     rho_crit_g_cm3_h2
 
-class HaloMassFcn(ParallelAnalysisInterface):
+class HaloMassFcn():
+    r"""
+    Initalize a HaloMassFcn object to analyze the distribution of halos as 
+    a function of mass.  A mass function can be created for a set of 
+    simulated halos, an analytic fit to can be created for a redshift and 
+    set of cosmological parameters, or both can be created.
+
+    Provided with a halo dataset object, this will make a the mass function 
+    for simulated halos.  Prodiving a simulation dataset will set as many 
+    of the cosmological parameters as possible for the creation of the 
+    analytic mass function.
+
+    The HaloMassFcn object has arrays hanging off of it containing the mass
+    function information.
+
+    masses_sim : Array 
+        Halo masses from simulated halos. Units: M_solar.
+    n_cumulative_sim : Array
+        Number density of halos with mass greater than the corresponding 
+        mass in masses_sim (simulated). Units: comoving Mpc^-3
+    masses_analytic : Array
+        Masses used for the generation of the analytic mass function, Units:
+        M_solar.
+    n_cumulative_analytic : Array
+        Number density of halos with mass greater then the corresponding
+        mass in masses_analytic (analytic). Units: comoving Mpc^-3
+    dndM_dM_analytic : Array
+        Differential number density of halos, (dn/dM)*dM (analytic).
+
+    The HaloMassFcn object also has a convenience function write_out() that
+    will write out the data to disk.
+
+    Creating a HaloMassFcn object with no arguments will produce an analytic
+    mass function at redshift = 0 using default cosmolocigal values.
+
+    Parameters
+    ----------
+    simulation_ds : Simulation dataset object
+        The loaded simulation dataset, used to set cosmological paramters.
+        Default : None.
+    halos_ds : Halo dataset object
+        The halos from a simulation to be used for creation of the 
+        halo mass function in the simulation.
+        Default : None.
+    make_analytic : bool 
+        Whether or not to calculate the analytic mass function to go with 
+        the simulated halo mass function.  Automatically set to true if a 
+        simulation dataset is provided.
+        Default : True.
+    omega_matter0 : float
+        The fraction of the universe made up of matter (dark and baryonic). 
+        Default : 0.2726.
+    omega_lambda0 : float
+        The fraction of the universe made up of dark energy. 
+        Default : 0.7274.
+    omega_baryon0  : float 
+        The fraction of the universe made up of baryonic matter. This is not 
+        always stored in the datset and should be checked by hand.
+        Default : 0.0456.
+    hubble0 : float 
+        The expansion rate of the universe in units of 100 km/s/Mpc. 
+        Default : 0.704.
+    sigma8 : float 
+        The amplitude of the linear power spectrum at z=0 as specified by 
+        the rms amplitude of mass-fluctuations in a top-hat sphere of radius 
+        8 Mpc/h. This is not always stored in the datset and should be 
+        checked by hand.
+        Default : 0.86.
+    primoridal_index : float 
+        This is the index of the mass power spectrum before modification by 
+        the transfer function. A value of 1 corresponds to the scale-free 
+        primordial spectrum. This is not always stored in the datset and 
+        should be checked by hand.
+        Default : 1.0.
+    this_redshift : float 
+        The current redshift. 
+        Default : 0.
+    log_mass_min : float 
+        The log10 of the mass of the minimum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 5. Units: M_solar
+        Default : None.
+    log_mass_max : float 
+        The log10 of the mass of the maximum of the halo mass range. This is
+        set automatically by the range of halo masses if a simulated halo 
+        dataset is provided. If a halo dataset if not provided and no value
+        is specified, it will be set to 16. Units: M_solar
+        Default : None.
+    num_sigma_bins : float
+        The number of bins (points) to use for the calculation of the 
+        analytic mass function. 
+        Default : 360.
+    fitting_function : int
+        Which fitting function to use. 1 = Press-Schechter, 2 = Jenkins, 
+        3 = Sheth-Tormen, 4 = Warren, 5 = Tinker
+        Default : 4.
+
+    Examples
+    --------
+
+    This creates the halo mass function for a halo dataset from a simulation
+    and the analytic mass function at the same redshift as the dataset,
+    using as many cosmological parameters as can be pulled from the dataset.
+
+    >>> halos_ds = load("rockstar_halos/halo_0.0.bin")
+    >>> hmf = HaloMassFcn(halos_ds=halos_ds)
+    >>> plt.loglog(hmf.masses_sim, hmf.n_cumulative_sim)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
+    >>> plt.savefig("mass_function.png")
+
+    This creates only the analytic halo mass function for a simulation
+    dataset, with default values for cosmological paramters not stored in 
+    the dataset.
+
+    >>> ds = load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> hmf = HaloMassFcn(simulation_ds=ds)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
+    >>> plt.savefig("mass_function.png")
+    
+    This creates the analytic mass function for an arbitrary set of 
+    cosmological parameters, with neither a simulation nor halo dataset.
+
+    >>> hmf = HaloMassFcn(omega_baryon0=0.05, omega_matter0=0.27, 
+                          omega_lambda0=0.73, hubble0=0.7, this_redshift=10,
+                          log_mass_min=5, log_mass_max=9)
+    >>> plt.loglog(hmf.masses_analytic, hmf.n_cumulative_analytic)
+    >>> plt.savefig("mass_function.png")
     """
-    Initalize a HaloMassFcn object to analyze the distribution of haloes
-    as a function of mass.
-    :param halo_file (str): The filename of the output of the Halo Profiler.
-    Default=None.
-    :param omega_matter0 (float): The fraction of the universe made up of
-    matter (dark and baryonic). Default=None.
-    :param omega_lambda0 (float): The fraction of the universe made up of
-    dark energy. Default=None.
-    :param omega_baryon0 (float): The fraction of the universe made up of
-    ordinary baryonic matter. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.05.
-    :param hubble0 (float): The expansion rate of the universe in units of
-    100 km/s/Mpc. Default=None.
-    :param sigma8input (float): The amplitude of the linear power
-    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-    in a top-hat sphere of radius 8 Mpc/h. This should match the value
-    used to create the initial conditions, using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=0.86.
-    :param primoridal_index (float): This is the index of the mass power
-    spectrum before modification by the transfer function. A value of 1
-    corresponds to the scale-free primordial spectrum. This should match
-    the value used to make the initial conditions using 'inits'. This is 
-    *not* stored in the enzo datset so it must be checked by hand.
-    Default=1.0.
-    :param this_redshift (float): The current redshift. Default=None.
-    :param log_mass_min (float): The log10 of the mass of the minimum of the
-    halo mass range. Default=None.
-    :param log_mass_max (float): The log10 of the mass of the maximum of the
-    halo mass range. Default=None.
-    :param num_sigma_bins (float): The number of bins (points) to use for
-    the calculations and generated fit. Default=360.
-    :param fitting_function (int): Which fitting function to use.
-    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-    5 = Tinker
-    Default=4.
-    :param mass_column (int): The column of halo_file that contains the
-    masses of the haloes. Default=4.
-    """
-    def __init__(self, ds, halo_file=None, omega_matter0=None, omega_lambda0=None,
-    omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
-    this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
-    fitting_function=4, mass_column=5):
-        ParallelAnalysisInterface.__init__(self)
-        self.ds = ds
-        self.halo_file = halo_file
+    def __init__(self, simulation_ds=None, halos_ds=None, make_analytic=True, 
+    omega_matter0=0.2726, omega_lambda0=0.7274, omega_baryon0=0.0456, hubble0=0.704, 
+    sigma8=0.86, primordial_index=1.0, this_redshift=0, log_mass_min=None, 
+    log_mass_max=None, num_sigma_bins=360, fitting_function=4):
+        self.simulation_ds = simulation_ds
+        self.halos_ds = halos_ds
         self.omega_matter0 = omega_matter0
         self.omega_lambda0 = omega_lambda0
         self.omega_baryon0 = omega_baryon0
         self.hubble0 = hubble0
-        self.sigma8input = sigma8input
+        self.sigma8 = sigma8
         self.primordial_index = primordial_index
         self.this_redshift = this_redshift
         self.log_mass_min = log_mass_min
         self.log_mass_max = log_mass_max
         self.num_sigma_bins = num_sigma_bins
         self.fitting_function = fitting_function
-        self.mass_column = mass_column
-        
-        # Determine the run mode.
-        if halo_file is None:
-            # We are hand-picking our various cosmological parameters
-            self.mode = 'single'
-        else:
-            # Make the fit using the same cosmological parameters as the dataset.
-            self.mode = 'haloes'
-            self.omega_matter0 = self.ds.omega_matter
-            self.omega_lambda0 = self.ds.omega_lambda
-            self.hubble0 = self.ds.hubble_constant
-            self.this_redshift = self.ds.current_redshift
-            self.read_haloes()
-            if self.log_mass_min == None:
-                self.log_mass_min = math.log10(min(self.haloes))
-            if self.log_mass_max == None:
-                self.log_mass_max = math.log10(max(self.haloes))
+        self.make_analytic = make_analytic
+        self.make_simulated = False
+        """
+        If we want to make an analytic mass function, grab what we can from either the 
+        halo file or the data set, and make sure that the user supplied everything else
+        that is needed.
+        """
+        # If we don't have any datasets, make the analytic function with user values
+        if simulation_ds is None and halos_ds is None:
+            # Set a reasonable mass min and max if none were provided
+            if log_mass_min is None:
+                self.log_mass_min = 5
+            if log_mass_max is None:
+                self.log_mass_max = 16
+        # If we're making the analytic function...
+        if self.make_analytic == True:
+            # Try to set cosmological parameters from the simulation dataset
+            if simulation_ds is not None:
+                self.omega_matter0 = self.simulation_ds.omega_matter
+                self.omega_lambda0 = self.simulation_ds.omega_lambda
+                self.hubble0 = self.simulation_ds.hubble_constant
+                self.this_redshift = self.simulation_ds.current_redshift
+                # Set a reasonable mass min and max if none were provided
+                if log_mass_min is None:
+                    self.log_mass_min = 5
+                if log_mass_max is None:
+                    self.log_mass_max = 16
+            # If we have a halo dataset but not a simulation dataset, use that instead
+            if simulation_ds is None and halos_ds is not None:
+                self.omega_matter0 = self.halos_ds.omega_matter
+                self.omega_lambda0 = self.halos_ds.omega_lambda
+                self.hubble0 = self.halos_ds.hubble_constant
+                self.this_redshift = self.halos_ds.current_redshift
+                # If the user didn't specify mass min and max, set them from the halos
+                if log_mass_min is None:
+                    self.set_mass_from_halos("min_mass")
+                if log_mass_max is None:
+                    self.set_mass_from_halos("max_mass")
+            # Do the calculations.
+            self.sigmaM()
+            self.dndm()
+            # Return the mass array in M_solar rather than M_solar/h
+            self.masses_analytic = YTArray(self.masses_analytic/self.hubble0, "Msun")
+            # The halo arrays will already have yt units, but the analytic forms do 
+            # not. If a dataset has been provided, use that to give them units. At the
+            # same time, convert to comoving (Mpc)^-3
+            if simulation_ds is not None:
+                self.n_cumulative_analytic = simulation_ds.arr(self.n_cumulative_analytic, 
+                                                          "(Mpccm)**(-3)")
+            elif halos_ds is not None:
+                self.n_cumulative_analytic = halos_ds.arr(self.n_cumulative_analytic, 
+                                                          "(Mpccm)**(-3)")
+            else:
+                from yt.units.unit_registry import UnitRegistry
+                from yt.units.dimensions import length
+                hmf_registry = UnitRegistry()
+                for my_unit in ["m", "pc", "AU", "au"]:
+                    new_unit = "%scm" % my_unit
+                    hmf_registry.add(new_unit, 
+                                     hmf_registry.lut[my_unit][0] / 
+                                     (1 + self.this_redshift),
+                                     length, "\\rm{%s}/(1+z)" % my_unit)                         
+                self.n_cumulative_analytic = YTArray(self.n_cumulative_analytic, 
+                                                     "(Mpccm)**(-3)", 
+                                                     registry=hmf_registry) 
 
-        # Input error check.
-        if self.mode == 'single':
-            if omega_matter0 == None or omega_lambda0 == None or \
-            hubble0 == None or this_redshift == None or log_mass_min == None or\
-            log_mass_max == None:
-                mylog.error("All of these parameters need to be set:")
-                mylog.error("[omega_matter0, omega_lambda0, \
-                hubble0, this_redshift, log_mass_min, log_mass_max]")
-                mylog.error("[%s,%s,%s,%s,%s,%s]" % (omega_matter0,\
-                omega_lambda0, hubble0, this_redshift,\
-                log_mass_min, log_mass_max))
-                return None
-        
-        # Poke the user to make sure they're doing it right.
-        mylog.info(
+
         """
-        Please make sure these are the correct values! They are
-        not stored in enzo datasets, so must be entered by hand.
-        sigma8input=%f primordial_index=%f omega_baryon0=%f
-        """ % (self.sigma8input, self.primordial_index, self.omega_baryon0))
-        
-        # Do the calculations.
-        self.sigmaM()
-        self.dndm()
-        
-        if self.mode == 'haloes':
-            self.bin_haloes()
+        If a halo file has been supplied, make a mass function for the simulated halos.
+        """
+        if halos_ds is not None:
+            # Used to check if a simulated halo mass funciton exists to write out
+            self.make_simulated=True
+            # Calculate the simulated halo mass function
+            self.create_sim_hmf()
 
-    def write_out(self, prefix='HMF', fit=True, haloes=True):
+    """
+    If we're making an analytic fit and have a halo dataset, but don't have log_mass_min 
+    or log_mass_max from the user, set it from the range of halo masses.
+    """
+    def set_mass_from_halos(self, which_limit):
+        data_source = self.halos_ds.all_data()
+        if which_limit is "min_mass":
+            self.log_mass_min = \
+              int(np.log10(np.amin(data_source["particle_mass"].in_units("Msun"))))
+        if which_limit is "max_mass":
+            self.log_mass_max = \
+              int(np.log10(np.amax(data_source["particle_mass"].in_units("Msun"))))+1
+    
+    """
+    Here's where we create the halo mass functions from simulated halos
+    """
+    def create_sim_hmf(self):
+        data_source = self.halos_ds.all_data()
+        # We're going to use indices to count the number of halos above a given mass
+        masses_sim = np.sort(data_source["particle_mass"].in_units("Msun"))
+        # Determine the size of the simulation volume in comoving Mpc**3
+        sim_volume = self.halos_ds.domain_width.in_units('Mpccm').prod()
+        n_cumulative_sim = np.arange(len(masses_sim),0,-1)
+        # We don't want repeated halo masses, and the unique indices tell us which values 
+        # correspond to distinct halo masses.
+        self.masses_sim, unique_indices = np.unique(masses_sim, return_index=True)
+        # Now make this an actual number density of halos as a function of mass.
+        self.n_cumulative_sim = n_cumulative_sim[unique_indices]/sim_volume
+        # masses_sim and n_cumulative_sim are now set, but remember that the log10 quantities
+        # are what is usually plotted for a halo mass function.
+
+    def write_out(self, prefix='HMF', analytic=True, simulated=True):
         """
         Writes out the halo mass functions to file(s) with prefix *prefix*.
         """
-        # First the fit file.
-        if fit:
-            fitname = prefix + '-fit.dat'
-            fp = self.comm.write_on_root(fitname)
-            line = \
-            """#Columns:
-#1. log10 of mass (Msolar, NOT Msolar/h)
-#2. mass (Msolar/h)
-#3. (dn/dM)*dM (differential number density of haloes, per Mpc^3 (NOT h^3/Mpc^3)
-#4. cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-"""
-            fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dn_M_z[i], self.nofmz_cum[i])
+        # First the analytic file, check that analytic fit exists and was requested
+        if analytic:
+            if self.make_analytic:
+                fitname = prefix + '-analytic.dat'
+                fp = open(fitname, "w")
+                line = \
+                "#Columns:\n" + \
+                "#1. mass (M_solar)\n" + \
+                "#2. cumulative number density of halos [comoving Mpc^-3]\n" + \
+                "#3. (dn/dM)*dM (differential number density of halos) [comoving Mpc^-3]\n"
                 fp.write(line)
-            fp.close()
-        if self.mode == 'haloes' and haloes:
-            haloname = prefix + '-haloes.dat'
-            fp = self.comm.write_on_root(haloname)
-            line = \
-            """#Columns:
-#1. log10 of mass (Msolar, NOT Msolar/h)
-#2. mass (Msolar/h)
-#3. cumulative number density of haloes (per Mpc^3, NOT h^3/Mpc^3)
-"""
-            fp.write(line)
-            for i in xrange(self.logmassarray.size - 1):
-                line = "%e\t%e\t%e\n" % (self.logmassarray[i], self.massarray[i],
-                self.dis[i])
+                for i in xrange(self.masses_analytic.size - 1):
+                    line = "%e\t%e\t%e\n" % (self.masses_analytic[i],
+                    self.n_cumulative_analytic[i], 
+                    self.dndM_dM_analytic[i])
+                    fp.write(line)
+                fp.close()
+            # If the analytic halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The analytic halo mass function was not created and cannot be " +
+                              "written out! Specify its creation with " +
+                              "HaloMassFcn(make_analytic=True, other_args) when creating the " +
+                              "HaloMassFcn object.")
+        # Write out the simulated mass fucntion if it exists and was requested
+        if simulated:
+            if self.make_simulated:
+                haloname = prefix + '-simulated.dat'
+                fp = open(haloname, "w")
+                line = \
+                "#Columns:\n" + \
+                "#1. mass [Msun]\n" + \
+                "#2. cumulative number density of halos [comoving Mpc^-3]\n"
                 fp.write(line)
-            fp.close()
-        
-    def read_haloes(self):
-        """
-        Read in the virial masses of the haloes.
-        """
-        mylog.info("Reading halo masses from %s" % self.halo_file)
-        f = open(self.halo_file,'r')
-        line = f.readline()
-        if line == "":
-            self.haloes = np.array([])
-            return
-        while line[0] == '#':
-            line = f.readline()
-        self.haloes = []
-        while line:
-            line = line.split()
-            mass = float(line[self.mass_column])
-            if mass > 0:
-                self.haloes.append(float(line[self.mass_column]))
-            line = f.readline()
-        f.close()
-        self.haloes = np.array(self.haloes)
-
-    def bin_haloes(self):
-        """
-        With the list of virial masses, find the halo mass function.
-        """
-        bins = np.logspace(self.log_mass_min,
-            self.log_mass_max,self.num_sigma_bins)
-        avgs = (bins[1:]+bins[:-1])/2.
-        dis, bins = np.histogram(self.haloes,bins)
-        # add right to left
-        for i,b in enumerate(dis):
-            dis[self.num_sigma_bins-i-3] += dis[self.num_sigma_bins-i-2]
-            if i == (self.num_sigma_bins - 3): break
-
-        self.dis = dis  / (self.ds.domain_width * self.ds.units["mpccm"]).prod()
+                for i in xrange(self.masses_sim.size - 1):
+                    line = "%e\t%e\n" % (self.masses_sim[i], 
+                    self.n_cumulative_sim[i])
+                    fp.write(line)
+                fp.close()
+            # If the simulated halo mass function wasn't created, warn the user
+            else:
+                mylog.warning("The simulated halo mass function was not created and cannot " +
+                              "be written out! Specify its creation by providing a loaded " +
+                              "halo dataset with HaloMassFcn(ds_halos=loaded_halo_dataset, " +
+                              "other_args) when creating the HaloMassFcn object.")
 
     def sigmaM(self):
         """
@@ -223,10 +339,8 @@
         
          Outputs: four columns of data containing the following information:
 
-         1) log mass (Msolar)
-         2) mass (Msolar/h)
-         3) Radius (comoving Mpc/h)
-         4) sigma (normalized) using Msun/h as the input
+         1) mass (Msolar/h)
+         2) sigma (normalized) using Msun/h as the input
          
          The arrays output are used later.
         """
@@ -239,24 +353,21 @@
             mylog.error("You should probably fix your cosmology parameters!")
 
         # output arrays
-        # 1) log10 of mass (Msolar, NOT Msolar/h)
-        self.Rarray = np.empty(self.num_sigma_bins,dtype='float64')
-        # 2) mass (Msolar/h)
-        self.logmassarray = np.empty(self.num_sigma_bins, dtype='float64')
-        # 3) spatial scale corresponding to that radius (Mpc/h)
-        self.massarray = np.empty(self.num_sigma_bins, dtype='float64')
-        # 4) sigma(M, z=0, where mass is in Msun/h)
+        # 1) mass (M_solar/h), changed to M_solar/h at output
+        self.masses_analytic = np.empty(self.num_sigma_bins, dtype='float64')
+        # 2) sigma(M, z=0, where mass is in Msun/h)
         self.sigmaarray = np.empty(self.num_sigma_bins, dtype='float64')
 
         # get sigma_8 normalization
         R = 8.0;  # in units of Mpc/h (comoving)
 
         sigma8_unnorm = math.sqrt(self.sigma_squared_of_R(R));
-        sigma_normalization = self.sigma8input / sigma8_unnorm;
+        sigma_normalization = self.sigma8 / sigma8_unnorm;
 
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-                rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2,
+                          'g/cm**3').in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()       
 
         # spacing in mass of our sigma calculation
         dm = (float(self.log_mass_max) - self.log_mass_min)/self.num_sigma_bins;
@@ -280,32 +391,31 @@
     
             R = thisradius; # h^-1 Mpc (comoving)
     
-            self.Rarray[i] = thisradius;  # h^-1 Mpc (comoving)
-            self.logmassarray[i] = thislogmass;  # Msun (NOT Msun/h)
-            self.massarray[i] = thismass;  # Msun/h
+            self.masses_analytic[i] = thismass;  # Msun/h
     
             # get normalized sigma(R)
             self.sigmaarray[i] = math.sqrt(self.sigma_squared_of_R(R)) * sigma_normalization;
             # All done!
 
     def dndm(self):
-        
         # constants - set these before calling any functions!
         # rho0 in units of h^2 Msolar/Mpc^3
-        rho0 = self.omega_matter0 * \
-            rho_crit_g_cm3_h2 * cm_per_mpc**3 / mass_sun_cgs
+        rho0 = YTQuantity(self.omega_matter0 * rho_crit_g_cm3_h2 * self.hubble0**2, 
+                          'g/cm**3').in_units('Msun/Mpc**3')
+        rho0 = rho0.value.item()
+
         self.delta_c0 = 1.69;  # critical density for turnaround (Press-Schechter)
         
-        nofmz_cum = 0.0;  # keep track of cumulative number density
+        n_cumulative_analytic = 0.0;  # keep track of cumulative number density
         
         # Loop over masses, going BACKWARD, and calculate dn/dm as well as the 
         # cumulative mass function.
         
         # output arrays
         # 5) (dn/dM)*dM (differential number density of halos, per Mpc^3 (NOT h^3/Mpc^3)
-        self.dn_M_z = np.empty(self.num_sigma_bins, dtype='float64')
+        self.dndM_dM_analytic = np.empty(self.num_sigma_bins, dtype='float64')
         # 6) cumulative number density of halos (per Mpc^3, NOT h^3/Mpc^3)
-        self.nofmz_cum = np.zeros(self.num_sigma_bins, dtype='float64')
+        self.n_cumulative_analytic = np.zeros(self.num_sigma_bins, dtype='float64')
         
         for j in xrange(self.num_sigma_bins - 1):
             i = (self.num_sigma_bins - 2) - j
@@ -313,25 +423,25 @@
             thissigma = self.sigmaof_M_z(i, self.this_redshift);
             nextsigma = self.sigmaof_M_z(i+1, self.this_redshift);
             
-            # calc dsigmadm - has units of h (since massarray has units of h^-1)
-            dsigmadm = (nextsigma-thissigma) / (self.massarray[i+1] - self.massarray[i]);
+            # calc dsigmadm - has units of h (since masses_analytic has units of h^-1)
+            dsigmadm = (nextsigma-thissigma) / (self.masses_analytic[i+1] - self.masses_analytic[i]);
 
             # calculate dn(M,z) (dn/dM * dM)
             # this has units of h^3 since rho0 has units of h^2, dsigmadm
-            # has units of h, and massarray has units of h^-1
-            dn_M_z = -1.0 / thissigma * dsigmadm * rho0 / self.massarray[i] * \
-            self.multiplicityfunction(thissigma)*(self.massarray[i+1] - self.massarray[i]);
+            # has units of h, and masses_analytic has units of h^-1
+            dndM_dM_analytic = -1.0 / thissigma * dsigmadm * rho0 / self.masses_analytic[i] * \
+            self.multiplicityfunction(thissigma)*(self.masses_analytic[i+1] - self.masses_analytic[i]);
 
             # scale by h^3 to get rid of all factors of h
-            dn_M_z *= math.pow(self.hubble0, 3.0);
+            dndM_dM_analytic *= math.pow(self.hubble0, 3.0);
             
             # keep track of cumulative number density
-            if dn_M_z > 1.0e-20:
-                nofmz_cum += dn_M_z;
+            if dndM_dM_analytic > 1.0e-20:
+                n_cumulative_analytic += dndM_dM_analytic;
             
             # Store this.
-            self.nofmz_cum[i] = nofmz_cum
-            self.dn_M_z[i] = dn_M_z
+            self.n_cumulative_analytic[i] = n_cumulative_analytic
+            self.dndM_dM_analytic[i] = dndM_dM_analytic
         
 
     def sigma_squared_of_R(self, R):

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -18,7 +18,9 @@
     requires_ds, \
     small_patch_amr, \
     big_patch_amr, \
-    data_dir_load
+    data_dir_load, \
+    AnalyticHaloMassFunctionTest, \
+    SimulatedHaloMassFunctionTest
 from yt.frontends.enzo.api import EnzoDataset
 
 _fields = ("temperature", "density", "velocity_magnitude",
@@ -69,6 +71,19 @@
         test_galaxy0030.__name__ = test.description
         yield test
 
+enzotiny = "enzo_tiny_cosmology/DD0046/DD0046"
+ at requires_ds(enzotiny)
+def test_simulated_halo_mass_function():
+    ds = data_dir_load(enzotiny)
+    for finder in ["fof", "hop"]:
+        yield SimulatedHaloMassFunctionTest(ds, finder)
+
+ at requires_ds(enzotiny)
+def test_analytic_halo_mass_function():
+    ds = data_dir_load(enzotiny)
+    for fit in range(1, 6):
+        yield AnalyticHaloMassFunctionTest(ds, fit)
+
 ecp = "enzo_cosmology_plus/DD0046/DD0046"
 @requires_ds(ecp, big_data=True)
 def test_ecp():

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -576,6 +576,55 @@
         for newc, oldc in zip(new_result["children"], old_result["children"]):
             assert(newp == oldp)
 
+class SimulatedHaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "SimulatedHaloMassFunction"
+    _attrs = ("finder",)
+
+    def __init__(self, ds_fn, finder):
+        super(SimulatedHaloMassFunctionTest, self).__init__(ds_fn)
+        self.finder = finder
+    
+    def run(self):
+        from yt.analysis_modules.halo_analysis.api import HaloCatalog
+        from yt.analysis_modules.halo_mass_function.api import HaloMassFcn
+        hc = HaloCatalog(data_ds=self.ds, finder_method=self.finder)
+        hc.create()
+        
+        hmf = HaloMassFcn(halos_ds=hc.halos_ds)
+        result = np.empty((2, hmf.masses_sim.size))
+        result[0] = hmf.masses_sim.d
+        result[1] = hmf.n_cumulative_sim.d
+        return result
+
+    def compare(self, new_result, old_result):
+        err_msg = ("Simulated halo mass functions not equation for " +
+                   "%s halo finder.") % self.finder
+        assert_equal(new_result, old_result,
+                     err_msg=err_msg, verbose=True)
+
+class AnalyticHaloMassFunctionTest(AnswerTestingTest):
+    _type_name = "AnalyticHaloMassFunction"
+    _attrs = ("fitting_function",)
+
+    def __init__(self, ds_fn, fitting_function):
+        super(AnalyticHaloMassFunctionTest, self).__init__(ds_fn)
+        self.fitting_function = fitting_function
+    
+    def run(self):
+        from yt.analysis_modules.halo_mass_function.api import HaloMassFcn
+        hmf = HaloMassFcn(simulation_ds=self.ds,
+                          fitting_function=self.fitting_function)
+        result = np.empty((2, hmf.masses_analytic.size))
+        result[0] = hmf.masses_analytic.d
+        result[1] = hmf.n_cumulative_analytic.d
+        return result
+
+    def compare(self, new_result, old_result):
+        err_msg = ("Analytic halo mass functions not equation for " +
+                   "fitting function %d.") % self.fitting_function
+        assert_equal(new_result, old_result,
+                     err_msg=err_msg, verbose=True)
+
 def compare_image_lists(new_result, old_result, decimals):
     fns = ['old.png', 'new.png']
     num_images = len(old_result)
@@ -677,7 +726,7 @@
         return comp_imgs
     def compare(self, new_result, old_result):
         compare_image_lists(new_result, old_result, self.decimals)
-        
+
 
 def requires_ds(ds_fn, big_data = False, file_check = False):
     def ffalse(func):

diff -r db184f28bbac06e15f81292ec306ed0b7f102a69 -r b2a39372ea82bfa7a22b78af7fddfe6137073a8c yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -97,6 +97,8 @@
 # flux
 jansky_cgs = 1.0e-23
 # Cosmological constants
+# Calculated with H = 100 km/s/Mpc, value given in units of h^2 g cm^-3
+# Multiply by h^2 to get the critical density in units of g cm^-3
 rho_crit_g_cm3_h2 = 1.8788e-29
 primordial_H_mass_fraction = 0.76

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list