[yt-svn] commit/yt: 3 new changesets

Bitbucket commits-noreply at bitbucket.org
Fri Feb 15 13:37:41 PST 2013


3 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/3dfcccb39e3e/
changeset:   3dfcccb39e3e
branch:      stable
user:        MatthewTurk
date:        2013-02-14 19:59:16
summary:     Merging to stable from development branch, in preparation for 2.5 release.
affected #:  235 files

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 .hgchurn
--- /dev/null
+++ b/.hgchurn
@@ -0,0 +1,11 @@
+stephenskory at yahoo.com = s at skory.us
+"Stephen Skory stephenskory at yahoo.com" = s at skory.us
+yuan at astro.columbia.edu = bear0980 at gmail.com
+juxtaposicion at gmail.com = cemoody at ucsc.edu
+chummels at gmail.com = chummels at astro.columbia.edu
+jwise at astro.princeton.edu = jwise at physics.gatech.edu
+atmyers = atmyers at berkeley.edu
+sam.skillman at gmail.com = samskillman at gmail.com
+casey at thestarkeffect.com = caseywstark at gmail.com
+chiffre = chiffre at posteo.de
+Christian Karch = chiffre at posteo.de

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,6 +4,7 @@
 freetype.cfg
 hdf5.cfg
 png.cfg
+yt_updater.log
 yt/frontends/ramses/_ramses_reader.cpp
 yt/utilities/amr_utils.c
 yt/utilities/kdtree/forthonf2c.h
@@ -26,6 +27,7 @@
 yt/utilities/lib/RayIntegrators.c
 yt/utilities/lib/VolumeIntegrator.c
 yt/utilities/lib/grid_traversal.c
+yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
 syntax: glob

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,6 +5152,10 @@
 0000000000000000000000000000000000000000 svn.993
 fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
 0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
 ca6e536c15a60070e6988fd472dc771a1897e170 yt-2.0
 882c41eed5dd4a3cdcbb567bcb79b833e46b1f42 yt-2.0.1
 a2b3521b1590c25029ca0bc602ad6cb7ae7b8ba2 yt-2.1

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README* 
+recursive-include yt *.pyx *.pxd *.hh *.h README*

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 distribute_setup.py
--- a/distribute_setup.py
+++ b/distribute_setup.py
@@ -14,11 +14,14 @@
 This file can also be run as a script to install or upgrade setuptools.
 """
 import os
+import shutil
 import sys
 import time
 import fnmatch
 import tempfile
 import tarfile
+import optparse
+
 from distutils import log
 
 try:
@@ -46,7 +49,7 @@
             args = [quote(arg) for arg in args]
         return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
 
-DEFAULT_VERSION = "0.6.21"
+DEFAULT_VERSION = "0.6.32"
 DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
 SETUPTOOLS_FAKED_VERSION = "0.6c11"
 
@@ -63,7 +66,7 @@
 """ % SETUPTOOLS_FAKED_VERSION
 
 
-def _install(tarball):
+def _install(tarball, install_args=()):
     # extracting the tarball
     tmpdir = tempfile.mkdtemp()
     log.warn('Extracting in %s', tmpdir)
@@ -81,11 +84,14 @@
 
         # installing
         log.warn('Installing Distribute')
-        if not _python_cmd('setup.py', 'install'):
+        if not _python_cmd('setup.py', 'install', *install_args):
             log.warn('Something went wrong during the installation.')
             log.warn('See the error message above.')
+            # exitcode will be 2
+            return 2
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
 
 
 def _build_egg(egg, tarball, to_dir):
@@ -110,6 +116,7 @@
 
     finally:
         os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
     # returning the result
     log.warn(egg)
     if not os.path.exists(egg):
@@ -144,7 +151,7 @@
         except ImportError:
             return _do_download(version, download_base, to_dir, download_delay)
         try:
-            pkg_resources.require("distribute>="+version)
+            pkg_resources.require("distribute>=" + version)
             return
         except pkg_resources.VersionConflict:
             e = sys.exc_info()[1]
@@ -167,6 +174,7 @@
         if not no_fake:
             _create_fake_setuptools_pkg_info(to_dir)
 
+
 def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
                         to_dir=os.curdir, delay=15):
     """Download distribute from a specified location and return its filename
@@ -203,6 +211,7 @@
                 dst.close()
     return os.path.realpath(saveto)
 
+
 def _no_sandbox(function):
     def __no_sandbox(*args, **kw):
         try:
@@ -227,6 +236,7 @@
 
     return __no_sandbox
 
+
 def _patch_file(path, content):
     """Will backup the file then patch it"""
     existing_content = open(path).read()
@@ -245,15 +255,18 @@
 
 _patch_file = _no_sandbox(_patch_file)
 
+
 def _same_content(path, content):
     return open(path).read() == content
 
+
 def _rename_path(path):
     new_name = path + '.OLD.%s' % time.time()
-    log.warn('Renaming %s into %s', path, new_name)
+    log.warn('Renaming %s to %s', path, new_name)
     os.rename(path, new_name)
     return new_name
 
+
 def _remove_flat_installation(placeholder):
     if not os.path.isdir(placeholder):
         log.warn('Unkown installation at %s', placeholder)
@@ -267,7 +280,7 @@
         log.warn('Could not locate setuptools*.egg-info')
         return
 
-    log.warn('Removing elements out of the way...')
+    log.warn('Moving elements out of the way...')
     pkg_info = os.path.join(placeholder, file)
     if os.path.isdir(pkg_info):
         patched = _patch_egg_dir(pkg_info)
@@ -289,11 +302,13 @@
 
 _remove_flat_installation = _no_sandbox(_remove_flat_installation)
 
+
 def _after_install(dist):
     log.warn('After install bootstrap.')
     placeholder = dist.get_command_obj('install').install_purelib
     _create_fake_setuptools_pkg_info(placeholder)
 
+
 def _create_fake_setuptools_pkg_info(placeholder):
     if not placeholder or not os.path.exists(placeholder):
         log.warn('Could not find the install location')
@@ -307,7 +322,11 @@
         return
 
     log.warn('Creating %s', pkg_info)
-    f = open(pkg_info, 'w')
+    try:
+        f = open(pkg_info, 'w')
+    except EnvironmentError:
+        log.warn("Don't have permissions to write %s, skipping", pkg_info)
+        return
     try:
         f.write(SETUPTOOLS_PKG_INFO)
     finally:
@@ -321,7 +340,10 @@
     finally:
         f.close()
 
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+    _create_fake_setuptools_pkg_info
+)
+
 
 def _patch_egg_dir(path):
     # let's check if it's already patched
@@ -343,6 +365,7 @@
 
 _patch_egg_dir = _no_sandbox(_patch_egg_dir)
 
+
 def _before_install():
     log.warn('Before install bootstrap.')
     _fake_setuptools()
@@ -351,7 +374,7 @@
 def _under_prefix(location):
     if 'install' not in sys.argv:
         return True
-    args = sys.argv[sys.argv.index('install')+1:]
+    args = sys.argv[sys.argv.index('install') + 1:]
     for index, arg in enumerate(args):
         for option in ('--root', '--prefix'):
             if arg.startswith('%s=' % option):
@@ -359,7 +382,7 @@
                 return location.startswith(top_dir)
             elif arg == option:
                 if len(args) > index:
-                    top_dir = args[index+1]
+                    top_dir = args[index + 1]
                     return location.startswith(top_dir)
         if arg == '--user' and USER_SITE is not None:
             return location.startswith(USER_SITE)
@@ -376,11 +399,14 @@
         return
     ws = pkg_resources.working_set
     try:
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
-                                  replacement=False))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools', replacement=False)
+            )
     except TypeError:
         # old distribute API
-        setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+        setuptools_dist = ws.find(
+            pkg_resources.Requirement.parse('setuptools')
+        )
 
     if setuptools_dist is None:
         log.warn('No setuptools distribution found')
@@ -414,7 +440,7 @@
         res = _patch_egg_dir(setuptools_location)
         if not res:
             return
-    log.warn('Patched done.')
+    log.warn('Patching complete.')
     _relaunch()
 
 
@@ -422,7 +448,9 @@
     log.warn('Relaunching...')
     # we have to relaunch the process
     # pip marker to avoid a relaunch bug
-    if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+    _cmd1 = ['-c', 'install', '--single-version-externally-managed']
+    _cmd2 = ['-c', 'install', '--record']
+    if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
         sys.argv[0] = 'setup.py'
     args = [sys.executable] + sys.argv
     sys.exit(subprocess.call(args))
@@ -448,7 +476,7 @@
             # Extract directories with a safe mode.
             directories.append(tarinfo)
             tarinfo = copy.copy(tarinfo)
-            tarinfo.mode = 448 # decimal for oct 0700
+            tarinfo.mode = 448  # decimal for oct 0700
         self.extract(tarinfo, path)
 
     # Reverse sort directories.
@@ -475,11 +503,39 @@
                 self._dbg(1, "tarfile: %s" % e)
 
 
-def main(argv, version=DEFAULT_VERSION):
+def _build_install_args(options):
+    """
+    Build the arguments to 'python setup.py install' on the distribute package
+    """
+    install_args = []
+    if options.user_install:
+        if sys.version_info < (2, 6):
+            log.warn("--user requires Python 2.6 or later")
+            raise SystemExit(1)
+        install_args.append('--user')
+    return install_args
+
+def _parse_args():
+    """
+    Parse the command line for options
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        '--user', dest='user_install', action='store_true', default=False,
+        help='install in user site package (requires Python 2.6 or later)')
+    parser.add_option(
+        '--download-base', dest='download_base', metavar="URL",
+        default=DEFAULT_URL,
+        help='alternative URL from where to download the distribute package')
+    options, args = parser.parse_args()
+    # positional arguments are ignored
+    return options
+
+def main(version=DEFAULT_VERSION):
     """Install or upgrade setuptools and EasyInstall"""
-    tarball = download_setuptools()
-    _install(tarball)
-
+    options = _parse_args()
+    tarball = download_setuptools(download_base=options.download_base)
+    return _install(tarball, _build_install_args(options))
 
 if __name__ == '__main__':
-    main(sys.argv[1:])
+    sys.exit(main())

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -13,12 +13,12 @@
  * Do not use nested classes unless you have a very good reason to, such as
    requiring a namespace or class-definition modification.  Classes should live
    at the top level.  __metaclass__ is exempt from this.
- * Do not use unecessary parenthesis in conditionals.  if((something) and
+ * Do not use unnecessary parenthesis in conditionals.  if((something) and
    (something_else)) should be rewritten as if something and something_else.
    Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do 
    "a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
-   be "na.multiply(a, 3, a)".
+   be "np.multiply(a, 3, a)".
  * In general, avoid all double-underscore method names: __something is usually
    unnecessary.
  * When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
 
    from yt.visualization.plot_collection import PlotCollection
 
- * Numpy is to be imported as "na" not "np".  While this may change in the
-   future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
  * Do not use too many keyword arguments.  If you have a lot of keyword
    arguments, then you are doing too much in __init__ and not enough via
    parameter setting.
@@ -51,7 +50,7 @@
    replace the old class.  Too many options makes for a confusing user
    experience.
  * Parameter files are a last resort.
- * The usage of the **kwargs construction should be avoided.  If they cannoted
+ * The usage of the **kwargs construction should be avoided.  If they cannot
    be avoided, they must be explained, even if they are only to be passed on to
    a nested function.
 

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,8 +10,6 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# NOTE: If you have trouble with wxPython, set INST_WXPYTHON=0 .
-#
 # And, feel free to drop me a line: matthewturk at gmail.com
 #
 
@@ -19,11 +17,16 @@
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="stable" # This is the branch to which we will forcibly update.
 
+if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+then
+    DEST_DIR=${YT_DEST}
+fi
+
 # Here's where you put the HDF5 path if you like; otherwise it'll download it
 # and install it on its own
 #HDF5_DIR=
 
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
 # This one turns on gfortran manually:
 #NUMPY_ARGS="--fcompiler=gnu95"
 # If you absolutely can't get the fortran to work, try this:
@@ -43,6 +46,8 @@
 INST_PYX=0      # Install PyX?  Sometimes PyX can be problematic without a
                 # working TeX installation.
 INST_0MQ=1      # Install 0mq (for IPython) and affiliated bindings?
+INST_ROCKSTAR=0 # Install the Rockstar halo finder?
+INST_SCIPY=0    # Install scipy?
 
 # If you've got yt some other place, set this to point to it.
 YT_DIR=""
@@ -154,18 +159,6 @@
         echo "   $ module swap PE-pgi PE-gnu"
         echo
     fi
-    if [ "${MYHOSTLONG%%ranger}" != "${MYHOSTLONG}" ]
-    then
-        echo "Looks like you're on Ranger."
-        echo
-        echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
-        echo "These commands should take care of that for you:"
-        echo
-        echo "   $ module unload mvapich2"
-        echo "   $ module swap pgi gcc"
-        echo "   $ module load mvapich2"
-        echo
-    fi
     if [ "${MYHOST##steele}" != "${MYHOST}" ]
     then
         echo "Looks like you're on Steele."
@@ -183,24 +176,53 @@
         echo
         echo "NOTE: you must have the Xcode command line tools installed."
         echo
-        echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
-        echo "website"
+	echo "The instructions for obtaining these tools varies according"
+	echo "to your exact OS version.  On older versions of OS X, you"
+	echo "must register for an account on the apple developer tools"
+	echo "website: https://developer.apple.com/downloads to obtain the"
+	echo "download link."
+	echo 
+	echo "We have gathered some additional instructions for each"
+	echo "version of OS X below. If you have trouble installing yt"
+	echo "after following these instructions, don't hesitate to contact"
+	echo "the yt user's e-mail list."
+	echo
+	echo "You can see which version of OSX you are running by clicking"
+	echo "'About This Mac' in the apple menu on the left hand side of"
+	echo "menu bar.  We're assuming that you've installed all operating"
+	echo "system updates; if you have an older version, we suggest"
+	echo "running software update and installing all available updates."
+	echo 
+        echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the" 
+	echo "Apple developer tools website."
         echo
-        echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
-        echo "website"
+        echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+	echo "developer tools website.  You can either download the"
+	echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+	echo "Software Update to update to XCode 3.2.6 or" 
+	echo "alternatively, you can download the Xcode 3.2.6/iOS SDK" 
+	echo "bundle (4.1 GB)."
         echo
-        echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
-        echo "alternatively download the Xcode command line tools from"
-        echo "the mac developer tools website"
+        echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+	echo "(search for Xcode)."
+        echo "Alternatively, download the Xcode command line tools from"
+        echo "the Apple developer tools website."
         echo
-        echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
-        echo "Leopard) or newer.  If you do, please set the following"
-        echo "environment variables, remove any broken installation tree, and"
-        echo "re-run this script verbatim."
+	echo "OS X 10.8.2: download Xcode 4.6 from the mac app store."
+	echo "(search for Xcode)."
+	echo "Additionally, you will have to manually install the Xcode"
+	echo "command line tools, see:" 
+	echo "http://stackoverflow.com/questions/9353444"
+	echo "Alternatively, download the Xcode command line tools from"
+	echo "the Apple developer tools website."
+	echo
+        echo "NOTE: It's possible that the installation will fail, if so," 
+	echo "please set the following environment variables, remove any" 
+	echo "broken installation tree, and re-run this script verbatim."
         echo
         echo "$ export CC=gcc-4.2"
         echo "$ export CXX=g++-4.2"
-        echo
+	echo
         OSX_VERSION=`sw_vers -productVersion`
         if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
         then
@@ -208,6 +230,27 @@
             MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
         fi
     fi
+    if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+    then
+        echo "Looks like you're on an OpenSUSE-compatible machine."
+        echo
+        echo "You need to have these packages installed:"
+        echo
+        echo "  * devel_C_C++"
+        echo "  * libopenssl-devel"
+        echo "  * libuuid-devel"
+        echo "  * zip"
+        echo "  * gcc-c++"
+        echo
+        echo "You can accomplish this by executing:"
+        echo
+        echo "$ sudo zypper install -t pattern devel_C_C++"
+        echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+        echo
+        echo "I am also setting special configure arguments to Python to"
+        echo "specify control lib/lib64 issues."
+        PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+    fi
     if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
     then
         echo "Looks like you're on an Ubuntu-compatible machine."
@@ -220,11 +263,38 @@
         echo "  * libncurses5-dev"
         echo "  * zip"
         echo "  * uuid-dev"
+        echo "  * libfreetype6-dev"
+        echo "  * tk-dev"
         echo
         echo "You can accomplish this by executing:"
         echo
-        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev"
+        echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
         echo
+        echo
+        echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+        echo " so you can use yt without the activate script, you might "
+        echo " want to consider turning off LIBZ and FREETYPE in this"
+        echo " install script by editing this file and setting"
+        echo
+        echo " INST_ZLIB=0"
+        echo " INST_FTYPE=0"
+        echo 
+        echo " to avoid conflicts with other command-line programs "
+        echo " (like eog and evince, for example)."
+    fi
+    if [ $INST_SCIPY -eq 1 ]
+    then
+	echo
+	echo "Looks like you've requested that the install script build SciPy."
+	echo
+	echo "If the SciPy build fails, please uncomment one of the the lines"
+	echo "at the top of the install script that sets NUMPY_ARGS, delete"
+	echo "any broken installation tree, and re-run the install script"
+	echo "verbatim."
+	echo
+	echo "If that doesn't work, don't hesitate to ask for help on the yt"
+	echo "user's mailing list."
+	echo
     fi
     if [ ! -z "${CFLAGS}" ]
     then
@@ -232,7 +302,7 @@
         echo "******************************************"
         echo "**                                      **"
         echo "**    Your CFLAGS is not empty.         **"
-        echo "**    This can beak h5py compilation.   **"
+        echo "**    This can break h5py compilation.  **"
         echo "**                                      **"
         echo "******************************************"
         echo "******************************************"
@@ -284,6 +354,10 @@
 get_willwont ${INST_PYX}
 echo "be installing PyX"
 
+printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
+get_willwont ${INST_SCIPY}
+echo "be installing scipy"
+
 printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
@@ -385,6 +459,14 @@
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
 
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
 ORIG_PWD=`pwd`
 
 if [ -z "${DEST_DIR}" ]
@@ -393,33 +475,46 @@
     exit 1
 fi
 
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
 # Now we dump all our SHA512 files out.
 
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec  Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff  PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
+echo 'eda1b8090e5e21e7e039ef4dd03de186a7b416df9d5a4e4422abeeb4d51383b9a6858e1ac4902d8e5010f661b295bbb2452c43c8738be668379b4eb4835d0f61  Cython-0.17.1.tar.gz' > Cython-0.17.1.tar.gz.sha512
 echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478  PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e  Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
+echo 'b981f8464575bb24c297631c87a3b9172312804a0fc14ce1fa7cb41ce2b0d2fd383cd1c816d6e10c36467d18bf9492d6faf557c81c04ff3b22debfa93f30ad0b  Python-2.7.3.tgz' > Python-2.7.3.tgz.sha512
 echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338  bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de  freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1  h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e  hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
+echo 'b3290c498191684781ca5286ab454eb1bd045e8d894f5b86fb86beb88f174e22ac3ab008fb02d6562051d9fa6a9593920cab433223f6d5473999913223b8e183  h5py-2.1.0.tar.gz' > h5py-2.1.0.tar.gz.sha512
+echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
 echo 'dbefad00fa34f4f21dca0f1e92e95bd55f1f4478fa0095dcf015b4d06f0c823ff11755cd777e507efaf1c9098b74af18f613ec9000e5c3a5cc1c7554fb5aefb8  libpng-1.5.12.tar.gz' > libpng-1.5.12.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff  matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
+echo '5b1a0fb52dcb21ca5f0ab71c8a49550e1e8cf633552ec6598dc43f0b32c03422bf5af65b30118c163231ecdddfd40846909336f16da318959106076e80a3fad0  matplotlib-1.2.0.tar.gz' > matplotlib-1.2.0.tar.gz.sha512
 echo '91693ca5f34934956a7c2c98bb69a5648b2a5660afd2ecf4a05035c5420450d42c194eeef0606d7683e267e4eaaaab414df23f30b34c88219bdd5c1a0f1f66ed  mercurial-2.5.1.tar.gz' > mercurial-2.5.1.tar.gz.sha512
 echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9  numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
 echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474  sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
 echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2  zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo '42021737c93cea513116e6051cff9b803e3f25d6019c74370b42f4c91d1af73e94ac2b7ace85b7565fa3d45b79231079bd48a242531beeafa33c36d7139ce838  ipython-0.13.tar.gz' > ipython-0.13.tar.gz.sha512
+echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
 echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e  zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
 echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af  pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
 echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf  tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-
+echo '1332e3d5465ca249c357314cf15d2a4e5e83a941841021b8f6a17a107dce268a7a082838ade5e8db944ecde6bfb111211ab218aa414ee90aafbb81f1491b3b93  Forthon-0.8.10.tar.gz' > Forthon-0.8.10.tar.gz.sha512
+echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
+echo '73de2c99406a38f85273931597525cec4ebef55b93712adca3b0bfea8ca3fc99446e5d6495817e9ad55cf4d48feb7fb49734675c4cc8938db8d4a5225d30eca7  python-hglib-0.2.tar.gz' > python-hglib-0.2.tar.gz.sha512
+echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
 [ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2 
 [ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
 [ $INST_PNG -eq 1 ] && get_ytproject libpng-1.5.12.tar.gz
@@ -429,15 +524,22 @@
 [ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
 [ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
+[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
+get_ytproject Python-2.7.3.tgz
 get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
+get_ytproject matplotlib-1.2.0.tar.gz
 get_ytproject mercurial-2.5.1.tar.gz
-get_ytproject ipython-0.13.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+get_ytproject ipython-0.13.1.tar.gz
+get_ytproject h5py-2.1.0.tar.gz
+get_ytproject Cython-0.17.1.tar.gz
 get_ytproject reason-js-20120623.zip
-
+get_ytproject Forthon-0.8.10.tar.gz
+get_ytproject nose-1.2.1.tar.gz 
+get_ytproject python-hglib-0.2.tar.gz
+get_ytproject sympy-0.7.2.tar.gz
+get_ytproject rockstar-0.99.6.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
     if [ ! -e bzip2-1.0.5/done ]
@@ -521,11 +623,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.7/done ]
+    if [ ! -e hdf5-1.8.9/done ]
     then
-        [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.7
+        cd hdf5-1.8.9
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -553,12 +655,12 @@
     fi
 fi
 
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e Python-2.7.3/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
-    cd Python-2.7.2
-    ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+    [ ! -e Python-2.7.3 ] && tar xfz Python-2.7.3.tgz
+    cd Python-2.7.3
+    ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -620,7 +722,40 @@
 echo "Installing pip"
 ( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
-do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+if [ $INST_SCIPY -eq 0 ]
+then
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+else
+    if [ ! -e scipy-0.11.0/done ]
+    then
+	if [ ! -e BLAS/done ]
+	then
+	    tar xfz blas.tar.gz
+	    echo "Building BLAS"
+	    cd BLAS
+	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
+	    ar r libfblas.a *.o 1>> ${LOG_FILE}
+	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    rm -rf *.o
+	    touch done
+	    cd ..
+	fi
+	if [ ! -e lapack-3.4.2/done ]
+	then
+	    tar xfz lapack-3.4.2.tar.gz
+	    echo "Building LAPACK"
+	    cd lapack-3.4.2/
+	    cp INSTALL/make.inc.gfortran make.inc
+	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    touch done
+	    cd ..
+	fi
+    fi
+    export BLAS=$PWD/BLAS/libfblas.a
+    export LAPACK=$PWD/lapack-3.4.2/liblapack.a    
+    do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
 then
@@ -641,10 +776,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/matplotlib-1.2.0
+echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.0/setup.cfg
+do_setup_py matplotlib-1.2.0
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -671,16 +806,42 @@
     do_setup_py tornado-2.2
 fi
 
-do_setup_py ipython-0.13
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
+do_setup_py ipython-0.13.1
+do_setup_py h5py-2.1.0
+do_setup_py Cython-0.17.1
+do_setup_py Forthon-0.8.10
+do_setup_py nose-1.2.1
+do_setup_py python-hglib-0.2
+do_setup_py sympy-0.7.2
 [ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
 
+# Now we build Rockstar and set its environment variable.
+if [ $INST_ROCKSTAR -eq 1 ]
+then
+    if [ ! -e Rockstar/done ]
+    then
+        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        echo "Building Rockstar"
+        cd Rockstar
+        ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        cp librockstar.so ${DEST_DIR}/lib
+        ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+        echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
+        touch done
+        cd ..
+    fi
+fi
+
 echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
 MY_PWD=`pwd`
 cd $YT_DIR
 ( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
 
+echo "Building Fortran kD-tree module."
+cd yt/utilities/kdtree
+( make 2>&1 ) 1>> ${LOG_FILE}
+cd ../../..
+
 echo "Installing yt"
 echo $HDF5_DIR > hdf5.cfg
 [ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
@@ -699,7 +860,7 @@
 then
     echo "Cloning a copy of Enzo."
     cd ${DEST_DIR}/src/
-    ${HG_EXEC} clone https://enzo.googlecode.com/hg/ ./enzo-hg-stable
+    ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
     cd $MY_PWD
 fi
 
@@ -784,3 +945,6 @@
 
 print_afterword
 print_afterword >> ${LOG_FILE}
+
+echo "yt dependencies were last updated on" > ${DEST_DIR}/.yt_update
+date >> ${DEST_DIR}/.yt_update

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
 [egg_info]
 #tag_build = .dev
 #tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 setup.py
--- a/setup.py
+++ b/setup.py
@@ -7,6 +7,7 @@
 import distribute_setup
 distribute_setup.use_setuptools()
 
+from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
 from numpy.distutils import log
 from distutils import version
@@ -106,10 +107,46 @@
 
 import setuptools
 
-VERSION = "2.4"
+VERSION = "2.5"
 
 if os.path.exists('MANIFEST'): os.remove('MANIFEST')
 
+def get_mercurial_changeset_id(target_dir):
+    """adapted from a script by Jason F. Harris, published at
+
+    http://jasonfharris.com/blog/2010/05/versioning-your-application-with-the-mercurial-changeset-hash/
+
+    """
+    import subprocess
+    import re
+    get_changeset = subprocess.Popen('hg identify -b -i',
+                                     stdout=subprocess.PIPE,
+                                     stderr=subprocess.PIPE,
+                                     shell=True)
+        
+    if (get_changeset.stderr.read() != ""):
+        print "Error in obtaining current changeset of the Mercurial repository"
+        changeset = None
+        
+    changeset = get_changeset.stdout.read().strip()
+    if (not re.search("^[0-9a-f]{12}", changeset)):
+        print "Current changeset of the Mercurial repository is malformed"
+        changeset = None
+
+    return changeset
+
+class my_build_py(build_py):
+    def run(self):
+        # honor the --dry-run flag
+        if not self.dry_run:
+            target_dir = os.path.join(self.build_lib,'yt')
+            src_dir =  os.getcwd() 
+            changeset = get_mercurial_changeset_id(src_dir)
+            self.mkpath(target_dir)
+            with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
+                fobj.write("hg_version = '%s'\n" % changeset)
+
+            build_py.run(self)
 
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
@@ -154,7 +191,11 @@
             'amr adaptivemeshrefinement',
         entry_points={'console_scripts': [
                             'yt = yt.utilities.command_line:run_main',
-                       ]},
+                      ],
+                      'nose.plugins.0.10': [
+                            'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+                      ]
+        },
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",
@@ -162,6 +203,7 @@
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,
+        cmdclass = {'build_py': my_build_py},
         )
     return
 

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,5 @@
   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
+
+__version__ = "2.5-dev"

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def voigt(a,u):
     """
@@ -65,15 +65,15 @@
             J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
                       Sep 1990 (better overflow checking)
     """
-    x = na.asarray(u).astype(na.float64)
-    y = na.asarray(a).astype(na.float64)
+    x = np.asarray(u).astype(np.float64)
+    y = np.asarray(a).astype(np.float64)
 
-    w = na.array([0.462243670,   0.286675505,   0.109017206, 
+    w = np.array([0.462243670,   0.286675505,   0.109017206, 
                   0.0248105209,  0.00324377334, 0.000228338636, 
                   7.80255648e-6, 1.08606937e-7, 4.39934099e-10, 
                   2.22939365e-13])
 
-    t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
+    t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771, 
                   2.25497400,  2.78880606,  3.34785457, 3.94476404, 
                   4.60368245,  5.38748089])
 
@@ -94,31 +94,31 @@
     y2 = y * y
 
     # limits are y<1.,  x<4 or y<1.8(x+1),  x>4 (no checking performed)
-    u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+    u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
 
     # Clenshaw's Algorithm
-    bno1 = na.zeros(x.shape)
-    bno2 = na.zeros(x.shape)
-    x1 = na.clip((x / 5.), -na.inf, 1.)
+    bno1 = np.zeros(x.shape)
+    bno2 = np.zeros(x.shape)
+    x1 = np.clip((x / 5.), -np.inf, 1.)
     coef = 4. * x1 * x1 - 2.
     for i in range(33, -1, -1):
         bn = coef * bno1 - bno2 + c[i]
-        bno2 = na.copy(bno1)
-        bno1 = na.copy(bn)
+        bno2 = np.copy(bno1)
+        bno1 = np.copy(bn)
 
     f = x1 * (bn - bno2)
     dno1 = 1. - 2. * x * f
     dno2 = f
 
-    q = na.abs(x) > 5
+    q = np.abs(x) > 5
     if q.any():
-        x14 = na.power(na.clip(x[q], -na.inf, 500.),  14)
-        x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
-        x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
-        x8  = na.power(na.clip(x[q], -na.inf, 50000.), 8)
-        x6  = na.power(na.clip(x[q], -na.inf, 1.e6),   6)
-        x4  = na.power(na.clip(x[q], -na.inf, 1.e9),   4)
-        x2  = na.power(na.clip(x[q], -na.inf, 1.e18),  2)
+        x14 = np.power(np.clip(x[q], -np.inf, 500.),  14)
+        x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+        x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+        x8  = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+        x6  = np.power(np.clip(x[q], -np.inf, 1.e6),   6)
+        x4  = np.power(np.clip(x[q], -np.inf, 1.e9),   4)
+        x2  = np.power(np.clip(x[q], -np.inf, 1.e18),  2)
         dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 + 
                     6.5625 / x8 + 29.53125 / x10 +
                     162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
             if (i % 2) == 1:
                 q = -q
                 yn = yn * y2
-                g = dn.astype(na.float64) * yn
+                g = dn.astype(np.float64) * yn
                 funct = funct + q * g
-                if na.max(na.abs(g / funct)) <= 1.e-8: break
+                if np.max(np.abs(g / funct)) <= 1.e-8: break
 
     k1 = u1 - 1.12837917 * funct
-    k1 = k1.astype(na.float64).clip(0)
+    k1 = k1.astype(np.float64).clip(0)
     return k1
 
 def tau_profile(lam0, fval, gamma, vkms, column_density, 
@@ -191,19 +191,19 @@
     ## create wavelength
     if lambda_bins is None:
         lambda_bins = lam1 + \
-            na.arange(n_lambda, dtype=na.float) * dlambda - \
+            np.arange(n_lambda, dtype=np.float) * dlambda - \
             n_lambda * dlambda / 2    # wavelength vector (angstroms)
     nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
 
     ## tau_0
-    tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+    tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
         column_density * fval / vdop
     tau1 = tau_X * lam1cgs
     tau0 = tau_X * lam0cgs
 
     # dimensionless frequency offset in units of doppler freq
     x = (nua - nu1) / nudop
-    a = gamma / (4 * na.pi * nudop)   # damping parameter 
+    a = gamma / (4 * np.pi * nudop)   # damping parameter 
     phi = voigt(a, x)                 # profile
     tauphi = tau0 * phi               # profile scaled with tau0
 

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -24,7 +24,7 @@
 """
 
 import h5py
-import numpy as na
+import numpy as np
 
 from absorption_line import tau_profile
 
@@ -48,7 +48,7 @@
         self.tau_field = None
         self.flux_field = None
         self.spectrum_line_list = None
-        self.lambda_bins = na.linspace(lambda_min, lambda_max, n_lambda)
+        self.lambda_bins = np.linspace(lambda_min, lambda_max, n_lambda)
         self.bin_width = (lambda_max - lambda_min) / float(n_lambda - 1)
         self.line_list = []
         self.continuum_list = []
@@ -114,13 +114,13 @@
             field_data[field] = input[field].value
         input.close()
 
-        self.tau_field = na.zeros(self.lambda_bins.size)
+        self.tau_field = np.zeros(self.lambda_bins.size)
         self.spectrum_line_list = []
 
         self._add_lines_to_spectrum(field_data, use_peculiar_velocity)
         self._add_continua_to_spectrum(field_data, use_peculiar_velocity)
 
-        self.flux_field = na.exp(-self.tau_field)
+        self.flux_field = np.exp(-self.tau_field)
 
         if output_file.endswith('.h5'):
             self._write_spectrum_hdf5(output_file)
@@ -148,20 +148,20 @@
                 delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
             this_wavelength = delta_lambda + continuum['wavelength']
-            right_index = na.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
-            left_index = na.digitize((this_wavelength *
-                                     na.power((tau_min * continuum['normalization'] /
+            right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
+            left_index = np.digitize((this_wavelength *
+                                     np.power((tau_min * continuum['normalization'] /
                                                column_density), (1. / continuum['index']))),
                                     self.lambda_bins).clip(0, self.n_lambda)
 
-            valid_continuua = na.where(((column_density /
+            valid_continuua = np.where(((column_density /
                                          continuum['normalization']) > tau_min) &
                                        (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding continuum feature - %s [%f A]: " % \
                                 (continuum['label'], continuum['wavelength']),
                             valid_continuua.size)
             for i, lixel in enumerate(valid_continuua):
-                line_tau = na.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
+                line_tau = np.power((self.lambda_bins[left_index[lixel]:right_index[lixel]] /
                                      this_wavelength[lixel]), continuum['index']) * \
                                      column_density[lixel] / continuum['normalization']
                 self.tau_field[left_index[lixel]:right_index[lixel]] += line_tau
@@ -184,10 +184,10 @@
                 # include factor of (1 + z) because our velocity is in proper frame.
                 delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
                     field_data['los_velocity'] / speed_of_light_cgs
-            thermal_b = km_per_cm * na.sqrt((2 * boltzmann_constant_cgs *
+            thermal_b = km_per_cm * np.sqrt((2 * boltzmann_constant_cgs *
                                              field_data['Temperature']) /
                                             (amu_cgs * line['atomic_mass']))
-            center_bins = na.digitize((delta_lambda + line['wavelength']),
+            center_bins = np.digitize((delta_lambda + line['wavelength']),
                                       self.lambda_bins)
 
             # ratio of line width to bin width
@@ -201,7 +201,7 @@
                            spectrum_bin_ratio * width_ratio).astype(int).clip(0, self.n_lambda)
 
             # loop over all lines wider than the bin width
-            valid_lines = na.where((width_ratio >= 1.0) &
+            valid_lines = np.where((width_ratio >= 1.0) &
                                    (right_index - left_index > 1))[0]
             pbar = get_pbar("Adding line - %s [%f A]: " % (line['label'], line['wavelength']),
                             valid_lines.size)

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -25,7 +25,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 from yt.funcs import *
 
 from yt.utilities.linear_interpolators import \
@@ -44,13 +44,13 @@
     mylog.warning("See yt/extensions/coordinate_transforms.py for plotting information")
     if center is None: center = pf.h.find_max("Density")[1]
     fields = ensure_list(fields)
-    r,theta,phi = na.mgrid[0:rmax:nr*1j,
-                           0:na.pi:ntheta*1j,
-                           0:2*na.pi:nphi*1j]
+    r,theta,phi = np.mgrid[0:rmax:nr*1j,
+                           0:np.pi:ntheta*1j,
+                           0:2*np.pi:nphi*1j]
     new_grid = dict(r=r, theta=theta, phi=phi)
-    new_grid['x'] = r*na.sin(theta)*na.cos(phi) + center[0]
-    new_grid['y'] = r*na.sin(theta)*na.sin(phi) + center[1]
-    new_grid['z'] = r*na.cos(theta)             + center[2]
+    new_grid['x'] = r*np.sin(theta)*np.cos(phi) + center[0]
+    new_grid['y'] = r*np.sin(theta)*np.sin(phi) + center[1]
+    new_grid['z'] = r*np.cos(theta)             + center[2]
     sphere = pf.h.sphere(center, rmax)
     return arbitrary_regrid(new_grid, sphere, fields, smoothed)
 
@@ -62,10 +62,10 @@
     This has not been well-tested other than for regular spherical regridding.
     """
     fields = ensure_list(fields)
-    new_grid['handled'] = na.zeros(new_grid['x'].shape, dtype='bool')
+    new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
-        new_grid[field] = na.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = na.argsort(data_source.gridLevels)
+        new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
+    grid_order = np.argsort(data_source.gridLevels)
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):
@@ -73,12 +73,12 @@
         cg = grid.retrieve_ghost_zones(1, fields, smoothed=smoothed)
 
         # makes x0,x1,y0,y1,z0,z1
-        bounds = na.concatenate(zip(cg.left_edge, cg.right_edge)) 
+        bounds = np.concatenate(zip(cg.left_edge, cg.right_edge)) 
 
         
         # Now we figure out which of our points are inside this grid
         # Note that we're only looking at the grid, not the grid-with-ghost-zones
-        point_ind = na.ones(new_grid['handled'].shape, dtype='bool') # everything at first
+        point_ind = np.ones(new_grid['handled'].shape, dtype='bool') # everything at first
         for i,ax in enumerate('xyz'): # i = 0,1,2 ; ax = x, y, z
             # &= does a logical_and on the array
             point_ind &= ( ( grid.LeftEdge[i] <= new_grid[ax]      )
@@ -116,7 +116,7 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(phi[:,i,:], r[:,i,:],
-                  na.log10(sph_grid[field][:,i,:]))
+                  np.log10(sph_grid[field][:,i,:]))
     pylab.savefig("polar/latitude_%03i.png" % i)
 
 for i in range(n_phi):
@@ -124,6 +124,6 @@
     pylab.clf()
     ax=pylab.subplot(1,1,1, projection="polar", aspect=1.)
     ax.pcolormesh(theta[:,:,i], r[:,:,i],
-                  na.log10(sph_grid[field][:,:,i]))
+                  np.log10(sph_grid[field][:,:,i]))
     pylab.savefig("polar/longitude_%03i.png" % i)
 """

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 from yt.convenience import \
     simulation
@@ -37,10 +37,11 @@
     cosmological distance.
     """
 
-    def __init__(self, parameter_filename, simulation_type):
+    def __init__(self, parameter_filename, simulation_type, find_outputs=False):
         self.parameter_filename = parameter_filename
         self.simulation_type = simulation_type
-        self.simulation = simulation(parameter_filename, simulation_type)
+        self.simulation = simulation(parameter_filename, simulation_type, 
+                                     find_outputs=find_outputs)
 
         self.cosmology = Cosmology(
             HubbleConstantNow=(100.0 * self.simulation.hubble_constant),
@@ -131,12 +132,12 @@
 
             # fill redshift space with datasets
             while ((z > near_redshift) and
-                   (na.fabs(z - near_redshift) > z_Tolerance)):
+                   (np.fabs(z - near_redshift) > z_Tolerance)):
 
                 # For first data dump, choose closest to desired redshift.
                 if (len(cosmology_splice) == 0):
                     # Sort data outputs by proximity to current redsfhit.
-                    self.splice_outputs.sort(key=lambda obj:na.fabs(z - \
+                    self.splice_outputs.sort(key=lambda obj:np.fabs(z - \
                         obj['redshift']))
                     cosmology_splice.append(self.splice_outputs[0])
 
@@ -145,7 +146,7 @@
                     current_slice = cosmology_splice[-1]
                     while current_slice['next'] is not None and \
                             (z < current_slice['next']['redshift'] or \
-                                 na.abs(z - current_slice['next']['redshift']) <
+                                 np.abs(z - current_slice['next']['redshift']) <
                                  z_Tolerance):
                         current_slice = current_slice['next']
 
@@ -163,7 +164,7 @@
         # Make light ray using maximum number of datasets (minimum spacing).
         else:
             # Sort data outputs by proximity to current redsfhit.
-            self.splice_outputs.sort(key=lambda obj:na.fabs(far_redshift -
+            self.splice_outputs.sort(key=lambda obj:np.fabs(far_redshift -
                                                                     obj['redshift']))
             # For first data dump, choose closest to desired redshift.
             cosmology_splice.append(self.splice_outputs[0])
@@ -245,9 +246,9 @@
         outputs = []
 
         while z > near_redshift:
-            rounded = na.round(z, decimals=decimals)
+            rounded = np.round(z, decimals=decimals)
             if rounded - z < 0:
-                rounded += na.power(10.0, (-1.0*decimals))
+                rounded += np.power(10.0, (-1.0*decimals))
             z = rounded
 
             deltaz_max = self._deltaz_forward(z, self.simulation.box_size)
@@ -288,7 +289,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2-target_distance)/distance2) > d_Tolerance):
+            while ((np.fabs(distance2-target_distance)/distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -298,9 +299,9 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
-            output['deltazMax'] = na.fabs(z2 - z)
+            output['deltazMax'] = np.fabs(z2 - z)
 
     def _calculate_deltaz_min(self, deltaz_min=0.0):
         r"""Calculate delta z that corresponds to a single top grid pixel
@@ -328,7 +329,7 @@
             distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
               self.simulation.hubble_constant
 
-            while ((na.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
+            while ((np.fabs(distance2 - target_distance) / distance2) > d_Tolerance):
                 m = (distance2 - distance1) / (z2 - z1)
                 z1 = z2
                 distance1 = distance2
@@ -338,10 +339,10 @@
                 iteration += 1
                 if (iteration > max_Iterations):
                     mylog.error("calculate_deltaz_max: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                                (z, na.fabs(z2 - z)))
+                                (z, np.fabs(z2 - z)))
                     break
             # Use this calculation or the absolute minimum specified by the user.
-            output['deltazMin'] = max(na.fabs(z2 - z), deltaz_min)
+            output['deltazMin'] = max(np.fabs(z2 - z), deltaz_min)
 
     def _deltaz_forward(self, z, target_distance):
         r"""Calculate deltaz corresponding to moving a comoving distance
@@ -363,7 +364,7 @@
         distance2 = self.cosmology.ComovingRadialDistance(z2, z) * \
           self.cosmology.HubbleConstantNow / 100.0
 
-        while ((na.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
+        while ((np.fabs(distance2 - target_distance)/distance2) > d_Tolerance):
             m = (distance2 - distance1) / (z2 - z1)
             z1 = z2
             distance1 = distance2
@@ -373,6 +374,6 @@
             iteration += 1
             if (iteration > max_Iterations):
                 mylog.error("deltaz_forward: Warning - max iterations exceeded for z = %f (delta z = %f)." %
-                            (z, na.fabs(z2 - z)))
+                            (z, np.fabs(z2 - z)))
                 break
-        return na.fabs(z2 - z)
+        return np.fabs(z2 - z)

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -24,25 +24,25 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 
 def common_volume(n_cube_1, n_cube_2, periodic=None):
     "Return the n-volume in common between the two n-cubes."
 
     # Check for proper args.
-    if ((len(na.shape(n_cube_1)) != 2) or
-        (na.shape(n_cube_1)[1] != 2) or
-        (na.shape(n_cube_1) != na.shape(n_cube_2))):
+    if ((len(np.shape(n_cube_1)) != 2) or
+        (np.shape(n_cube_1)[1] != 2) or
+        (np.shape(n_cube_1) != np.shape(n_cube_2))):
         print "Arguments must be 2 (n, 2) numpy array."
         return 0
 
     if ((periodic is not None) and
-        (na.shape(n_cube_1) != na.shape(periodic))):
+        (np.shape(n_cube_1) != np.shape(periodic))):
         print "periodic argument must be (n, 2) numpy array."
         return 0
 
     nCommon = 1.0
-    for q in range(na.shape(n_cube_1)[0]):
+    for q in range(np.shape(n_cube_1)[0]):
         if (periodic is None):
             nCommon *= common_segment(n_cube_1[q], n_cube_2[q])
         else:
@@ -97,10 +97,10 @@
             return min(flen1, flen2)
 
         # Adjust for periodicity
-        seg1[0] = na.mod(seg1[0], scale) + periodic[0]
+        seg1[0] = np.mod(seg1[0], scale) + periodic[0]
         seg1[1] = seg1[0] + len1
         if (seg1[1] > periodic[1]): seg1[1] -= scale
-        seg2[0] = na.mod(seg2[0], scale) + periodic[0]
+        seg2[0] = np.mod(seg2[0], scale) + periodic[0]
         seg2[1] = seg2[0] + len2
         if (seg2[1] > periodic[1]): seg2[1] -= scale
 

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 
 from yt.funcs import *
 from yt.analysis_modules.halo_profiler.api import \
@@ -77,7 +77,7 @@
 
     # Write out cube of masks from each slice.
     if cube_file is not None:
-        _write_halo_mask(cube_file, na.array(light_cone_mask))
+        _write_halo_mask(cube_file, np.array(light_cone_mask))
 
     # Write out a text list of all halos in the image.
     if map_file is not None:
@@ -86,7 +86,7 @@
     # Write out final mask.
     if mask_file is not None:
         # Final mask is simply the product of the mask from each slice.
-        final_mask = na.ones(shape=(pixels, pixels))
+        final_mask = np.ones(shape=(pixels, pixels))
         for mask in light_cone_mask:
             final_mask *= mask
         _write_halo_mask(mask_file, final_mask)
@@ -103,7 +103,7 @@
     output = h5py.File(filename, 'a')
     if 'HaloMask' in output.keys():
         del output['HaloMask']
-    output.create_dataset('HaloMask', data=na.array(halo_mask))
+    output.create_dataset('HaloMask', data=np.array(halo_mask))
     output.close()
 
 @parallel_root_only
@@ -155,21 +155,21 @@
     # Make boolean mask and cut out halos.
     dx = slice['box_width_fraction'] / pixels
     x = [(q + 0.5) * dx for q in range(pixels)]
-    haloMask = na.ones(shape=(pixels, pixels), dtype=bool)
+    haloMask = np.ones(shape=(pixels, pixels), dtype=bool)
 
     # Cut out any pixel that has any part at all in the circle.
     for q in range(len(all_halo_radius)):
-        dif_xIndex = na.array(int(all_halo_x[q]/dx) -
-                              na.array(range(pixels))) != 0
-        dif_yIndex = na.array(int(all_halo_y[q]/dx) -
-                              na.array(range(pixels))) != 0
+        dif_xIndex = np.array(int(all_halo_x[q]/dx) -
+                              np.array(range(pixels))) != 0
+        dif_yIndex = np.array(int(all_halo_y[q]/dx) -
+                              np.array(range(pixels))) != 0
 
-        xDistance = (na.abs(x - all_halo_x[q]) -
+        xDistance = (np.abs(x - all_halo_x[q]) -
                      (0.5 * dx)) * dif_xIndex
-        yDistance = (na.abs(x - all_halo_y[q]) -
+        yDistance = (np.abs(x - all_halo_y[q]) -
                      (0.5 * dx)) * dif_yIndex
 
-        distance = na.array([na.sqrt(w**2 + xDistance**2)
+        distance = np.array([np.sqrt(w**2 + xDistance**2)
                              for w in yDistance])
         haloMask *= (distance >= all_halo_radius[q])
 
@@ -231,11 +231,11 @@
                                Mpc_units)
             halo_mass.append(halo['TotalMassMsun_%d' % virial_overdensity])
 
-    halo_x = na.array(halo_x)
-    halo_y = na.array(halo_y)
-    halo_depth = na.array(halo_depth)
-    halo_radius = na.array(halo_radius)
-    halo_mass = na.array(halo_mass)
+    halo_x = np.array(halo_x)
+    halo_y = np.array(halo_y)
+    halo_depth = np.array(halo_depth)
+    halo_radius = np.array(halo_radius)
+    halo_mass = np.array(halo_mass)
 
     # Adjust halo centers along line of sight.
     depth_center = slice['projection_center'][slice['projection_axis']]
@@ -247,15 +247,15 @@
     add_left = (halo_depth + halo_radius) > 1 # should be box width
     add_right = (halo_depth - halo_radius) < 0
 
-    halo_depth = na.concatenate([halo_depth,
+    halo_depth = np.concatenate([halo_depth,
                                  (halo_depth[add_left]-1),
                                  (halo_depth[add_right]+1)])
-    halo_x = na.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
-    halo_y = na.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
-    halo_radius = na.concatenate([halo_radius,
+    halo_x = np.concatenate([halo_x, halo_x[add_left], halo_x[add_right]])
+    halo_y = np.concatenate([halo_y, halo_y[add_left], halo_y[add_right]])
+    halo_radius = np.concatenate([halo_radius,
                                   halo_radius[add_left],
                                   halo_radius[add_right]])
-    halo_mass = na.concatenate([halo_mass,
+    halo_mass = np.concatenate([halo_mass,
                                 halo_mass[add_left],
                                 halo_mass[add_right]])
 
@@ -284,19 +284,19 @@
         del mask
     del halo_depth
 
-    all_halo_x = na.array([])
-    all_halo_y = na.array([])
-    all_halo_radius = na.array([])
-    all_halo_mass = na.array([])
+    all_halo_x = np.array([])
+    all_halo_y = np.array([])
+    all_halo_radius = np.array([])
+    all_halo_mass = np.array([])
 
     # Tile halos of width box fraction is greater than one.
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(slice['box_width_fraction']))):
-        for y in range(int(na.ceil(slice['box_width_fraction']))):
-            all_halo_x = na.concatenate([all_halo_x, halo_x+x])
-            all_halo_y = na.concatenate([all_halo_y, halo_y+y])
-            all_halo_radius = na.concatenate([all_halo_radius, halo_radius])
-            all_halo_mass = na.concatenate([all_halo_mass, halo_mass])
+    for x in range(int(np.ceil(slice['box_width_fraction']))):
+        for y in range(int(np.ceil(slice['box_width_fraction']))):
+            all_halo_x = np.concatenate([all_halo_x, halo_x+x])
+            all_halo_y = np.concatenate([all_halo_y, halo_y+y])
+            all_halo_radius = np.concatenate([all_halo_radius, halo_radius])
+            all_halo_mass = np.concatenate([all_halo_mass, halo_mass])
 
     del halo_x, halo_y, halo_radius, halo_mass
 
@@ -310,8 +310,8 @@
 
     # Wrap off-edge centers back around to
     # other side (periodic boundary conditions).
-    all_halo_x[all_halo_x < 0] += na.ceil(slice['box_width_fraction'])
-    all_halo_y[all_halo_y < 0] += na.ceil(slice['box_width_fraction'])
+    all_halo_x[all_halo_x < 0] += np.ceil(slice['box_width_fraction'])
+    all_halo_y[all_halo_y < 0] += np.ceil(slice['box_width_fraction'])
 
     # After shifting, some centers have fractional coverage
     # on both sides of the box.
@@ -319,9 +319,9 @@
 
     # Centers hanging off the right edge.
     add_x_right = all_halo_x + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_x_halo_x = all_halo_x[add_x_right]
-    add_x_halo_x -= na.ceil(slice['box_width_fraction'])
+    add_x_halo_x -= np.ceil(slice['box_width_fraction'])
     add_x_halo_y = all_halo_y[add_x_right]
     add_x_halo_radius = all_halo_radius[add_x_right]
     add_x_halo_mass = all_halo_mass[add_x_right]
@@ -330,7 +330,7 @@
     # Centers hanging off the left edge.
     add_x_left = all_halo_x - all_halo_radius < 0
     add2_x_halo_x = all_halo_x[add_x_left]
-    add2_x_halo_x += na.ceil(slice['box_width_fraction'])
+    add2_x_halo_x += np.ceil(slice['box_width_fraction'])
     add2_x_halo_y = all_halo_y[add_x_left]
     add2_x_halo_radius = all_halo_radius[add_x_left]
     add2_x_halo_mass = all_halo_mass[add_x_left]
@@ -338,10 +338,10 @@
 
     # Centers hanging off the top edge.
     add_y_right = all_halo_y + all_halo_radius > \
-      na.ceil(slice['box_width_fraction'])
+      np.ceil(slice['box_width_fraction'])
     add_y_halo_x = all_halo_x[add_y_right]
     add_y_halo_y = all_halo_y[add_y_right]
-    add_y_halo_y -= na.ceil(slice['box_width_fraction'])
+    add_y_halo_y -= np.ceil(slice['box_width_fraction'])
     add_y_halo_radius = all_halo_radius[add_y_right]
     add_y_halo_mass = all_halo_mass[add_y_right]
     del add_y_right
@@ -350,24 +350,24 @@
     add_y_left = all_halo_y - all_halo_radius < 0
     add2_y_halo_x = all_halo_x[add_y_left]
     add2_y_halo_y = all_halo_y[add_y_left]
-    add2_y_halo_y += na.ceil(slice['box_width_fraction'])
+    add2_y_halo_y += np.ceil(slice['box_width_fraction'])
     add2_y_halo_radius = all_halo_radius[add_y_left]
     add2_y_halo_mass = all_halo_mass[add_y_left]
     del add_y_left
 
     # Add the hanging centers back to the projection data.
-    all_halo_x = na.concatenate([all_halo_x,
+    all_halo_x = np.concatenate([all_halo_x,
                                  add_x_halo_x, add2_x_halo_x,
                                  add_y_halo_x, add2_y_halo_x])
-    all_halo_y = na.concatenate([all_halo_y,
+    all_halo_y = np.concatenate([all_halo_y,
                                  add_x_halo_y, add2_x_halo_y,
                                  add_y_halo_y, add2_y_halo_y])
-    all_halo_radius = na.concatenate([all_halo_radius,
+    all_halo_radius = np.concatenate([all_halo_radius,
                                       add_x_halo_radius,
                                       add2_x_halo_radius,
                                       add_y_halo_radius,
                                       add2_y_halo_radius])
-    all_halo_mass = na.concatenate([all_halo_mass,
+    all_halo_mass = np.concatenate([all_halo_mass,
                                     add_x_halo_mass,
                                     add2_x_halo_mass,
                                     add_y_halo_mass,

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -25,7 +25,7 @@
 
 import copy
 import h5py
-import numpy as na
+import numpy as np
 import os
 
 from yt.funcs import *
@@ -57,7 +57,7 @@
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
-                 set_parameters=None,
+                 find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
         """
         Initialize a LightCone object.
@@ -102,6 +102,10 @@
             Whether or not to include redshift outputs when gathering
             datasets for time series.
             Default: True.
+        find_outputs : bool
+            Whether or not to search for parameter files in the current 
+            directory.
+            Default: False.
         set_parameters : dict
             Dictionary of parameters to attach to pf.parameters.
             Default: None.
@@ -150,7 +154,8 @@
             only_on_root(os.mkdir, self.output_dir)
 
         # Calculate light cone solution.
-        CosmologySplice.__init__(self, parameter_filename, simulation_type)
+        CosmologySplice.__init__(self, parameter_filename, simulation_type,
+                                 find_outputs=find_outputs)
         self.light_cone_solution = \
           self.create_cosmology_splice(self.near_redshift, self.far_redshift,
                                        minimal=self.use_minimum_datasets,
@@ -193,7 +198,7 @@
 
         # Calculate projection sizes, and get
         # random projection axes and centers.
-        na.random.seed(self.original_random_seed)
+        np.random.seed(self.original_random_seed)
 
         # For box coherence, keep track of effective depth travelled.
         box_fraction_used = 0.0
@@ -245,9 +250,9 @@
                self.light_cone_solution[q]['box_depth_fraction'] > 1.0):
                 # Random axis and center.
                 self.light_cone_solution[q]['projection_axis'] = \
-                  na.random.randint(0, 3)
+                  np.random.randint(0, 3)
                 self.light_cone_solution[q]['projection_center'] = \
-                  [na.random.random() for i in range(3)]
+                  [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice,
@@ -337,7 +342,7 @@
                                                    njobs=njobs,
                                                    dynamic=dynamic)
             # Collapse cube into final mask.
-            self.halo_mask = na.ones(shape=(self.pixels, self.pixels),
+            self.halo_mask = np.ones(shape=(self.pixels, self.pixels),
                                      dtype=bool)
             for mask in halo_mask_cube:
                 self.halo_mask *= mask
@@ -423,7 +428,7 @@
                 boxSizeProper = self.simulation.box_size / \
                   (self.simulation.hubble_constant * (1.0 + output['redshift']))
                 pixelarea = (boxSizeProper/self.pixels)**2 #in proper cm^2
-                factor = pixelarea/(4.0*na.pi*dL**2)
+                factor = pixelarea/(4.0*np.pi*dL**2)
                 mylog.info("Distance to slice = %e" % dL)
                 frb[field] *= factor #in erg/s/cm^2/Hz on observer's image plane.
 
@@ -456,7 +461,7 @@
                 else:
                     my_image = all_storage[my_slice]['field'] / \
                       all_storage[my_slice]['weight_field']
-                only_on_root(write_image, na.log10(my_image),
+                only_on_root(write_image, np.log10(my_image),
                              "%s_%s.png" % (name, field), cmap_name=cmap_name)
 
             self.projection_stack.append(all_storage[my_slice]['field'])
@@ -486,7 +491,7 @@
 
         # Write image.
         if save_final_image:
-            only_on_root(write_image, na.log10(light_cone_projection),
+            only_on_root(write_image, np.log10(light_cone_projection),
                          "%s_%s.png" % (filename, field), cmap_name=cmap_name)
 
         # Write stack to hdf5 file.
@@ -556,7 +561,7 @@
         box_fraction_used = 0.0
 
         # Seed random number generator with new seed.
-        na.random.seed(int(new_seed))
+        np.random.seed(int(new_seed))
 
         for q, output in enumerate(self.light_cone_solution):
             # It is necessary to make the same number of calls to the random
@@ -573,9 +578,9 @@
                 # Get random projection axis and center.
                 # If recycling, axis will get thrown away since it is used in
                 # creating a unique projection object.
-                newAxis = na.random.randint(0, 3)
+                newAxis = np.random.randint(0, 3)
 
-                newCenter = [na.random.random() for i in range(3)]
+                newCenter = [np.random.random() for i in range(3)]
                 box_fraction_used = 0.0
             else:
                 # Same axis and center as previous slice, but with depth center shifted.
@@ -595,8 +600,8 @@
             box_fraction_used += self.light_cone_solution[q]['box_depth_fraction']
 
             # Make list of rectangle corners to calculate common volume.
-            newCube = na.zeros(shape=(len(newCenter), 2))
-            oldCube = na.zeros(shape=(len(newCenter), 2))
+            newCube = np.zeros(shape=(len(newCenter), 2))
+            oldCube = np.zeros(shape=(len(newCenter), 2))
             for w in range(len(newCenter)):
                 if (w == self.master_solution[q]['projection_axis']):
                     oldCube[w] = [self.master_solution[q]['projection_center'][w] -
@@ -625,7 +630,7 @@
                                   0.5 * self.master_solution[q]['box_width_fraction']]
 
             my_volume += common_volume(oldCube, newCube,
-                                           periodic=na.array([[0, 1],
+                                           periodic=np.array([[0, 1],
                                                               [0, 1],
                                                               [0, 1]]))
             total_volume += output['box_depth_fraction'] * \
@@ -686,7 +691,7 @@
         "Save the light cone projection stack as a 3d array in and hdf5 file."
 
         # Make list of redshifts to include as a dataset attribute.
-        redshiftList = na.array([my_slice['redshift'] \
+        redshiftList = np.array([my_slice['redshift'] \
                                  for my_slice in self.light_cone_solution])
 
         field_node = "%s_%s" % (field, weight_field)
@@ -722,16 +727,16 @@
 
         if write_data:
             mylog.info("Saving %s to %s." % (field_node, filename))
-            self.projection_stack = na.array(self.projection_stack)
+            self.projection_stack = np.array(self.projection_stack)
             field_dataset = output.create_dataset(field_node,
                                                   data=self.projection_stack)
             field_dataset.attrs['redshifts'] = redshiftList
             field_dataset.attrs['observer_redshift'] = \
-              na.float(self.observer_redshift)
+              np.float(self.observer_redshift)
             field_dataset.attrs['field_of_view_in_arcminutes'] = \
-              na.float(self.field_of_view_in_arcminutes)
+              np.float(self.field_of_view_in_arcminutes)
             field_dataset.attrs['image_resolution_in_arcseconds'] = \
-              na.float(self.image_resolution_in_arcseconds)
+              np.float(self.image_resolution_in_arcseconds)
 
         if (len(self.projection_weight_field_stack) > 0):
             if node_exists:
@@ -749,16 +754,16 @@
             if write_data:
                 mylog.info("Saving %s to %s." % (weight_field_node, filename))
                 self.projection_weight_field_stack = \
-                  na.array(self.projection_weight_field_stack)
+                  np.array(self.projection_weight_field_stack)
                 weight_field_dataset = \
                   output.create_dataset(weight_field_node,
                                         data=self.projection_weight_field_stack)
                 weight_field_dataset.attrs['redshifts'] = redshiftList
                 weight_field_dataset.attrs['observer_redshift'] = \
-                  na.float(self.observer_redshift)
+                  np.float(self.observer_redshift)
                 weight_field_dataset.attrs['field_of_view_in_arcminutes'] = \
-                  na.float(self.field_of_view_in_arcminutes)
+                  np.float(self.field_of_view_in_arcminutes)
                 weight_field_dataset.attrs['image_resolution_in_arcseconds'] = \
-                  na.float(self.image_resolution_in_arcseconds)
+                  np.float(self.image_resolution_in_arcseconds)
 
         output.close()

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -23,7 +23,7 @@
   along with this program.  If not, see <http://www.gnu.org/licenses/>.
 """
 
-import numpy as na
+import numpy as np
 import copy
 
 from yt.funcs import *
@@ -98,15 +98,15 @@
     original_weight_field = copy.deepcopy(proj['weight_field'])
 
     # Copy original into offset positions to make tiles.
-    for x in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
-        for y in range(int(na.ceil(lightConeSlice['box_width_fraction']))):
+    for x in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
+        for y in range(int(np.ceil(lightConeSlice['box_width_fraction']))):
             if ((x + y) > 0):
-                proj['px'] = na.concatenate([proj['px'], original_px+x])
-                proj['py'] = na.concatenate([proj['py'], original_py+y])
-                proj['pdx'] = na.concatenate([proj['pdx'], original_pdx])
-                proj['pdy'] = na.concatenate([proj['pdy'], original_pdy])
-                proj[field] = na.concatenate([proj[field], original_field])
-                proj['weight_field'] = na.concatenate([proj['weight_field'],
+                proj['px'] = np.concatenate([proj['px'], original_px+x])
+                proj['py'] = np.concatenate([proj['py'], original_py+y])
+                proj['pdx'] = np.concatenate([proj['pdx'], original_pdx])
+                proj['pdy'] = np.concatenate([proj['pdy'], original_pdy])
+                proj[field] = np.concatenate([proj[field], original_field])
+                proj['weight_field'] = np.concatenate([proj['weight_field'],
                                                        original_weight_field])
 
     # Delete originals.
@@ -129,17 +129,17 @@
     proj['py'] -= offset[1]
 
     # Wrap off-edge cells back around to other side (periodic boundary conditions).
-    proj['px'][proj['px'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
-    proj['py'][proj['py'] < 0] += na.ceil(lightConeSlice['box_width_fraction'])
+    proj['px'][proj['px'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
+    proj['py'][proj['py'] < 0] += np.ceil(lightConeSlice['box_width_fraction'])
 
     # After shifting, some cells have fractional coverage on both sides of the box.
     # Find those cells and make copies to be placed on the other side.
 
     # Cells hanging off the right edge.
     add_x_right = proj['px'] + 0.5 * proj['pdx'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_x_px = proj['px'][add_x_right]
-    add_x_px -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_x_px -= np.ceil(lightConeSlice['box_width_fraction'])
     add_x_py = proj['py'][add_x_right]
     add_x_pdx = proj['pdx'][add_x_right]
     add_x_pdy = proj['pdy'][add_x_right]
@@ -150,7 +150,7 @@
     # Cells hanging off the left edge.
     add_x_left = proj['px'] - 0.5 * proj['pdx'] < 0
     add2_x_px = proj['px'][add_x_left]
-    add2_x_px += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_x_px += np.ceil(lightConeSlice['box_width_fraction'])
     add2_x_py = proj['py'][add_x_left]
     add2_x_pdx = proj['pdx'][add_x_left]
     add2_x_pdy = proj['pdy'][add_x_left]
@@ -160,10 +160,10 @@
 
     # Cells hanging off the top edge.
     add_y_right = proj['py'] + 0.5 * proj['pdy'] > \
-      na.ceil(lightConeSlice['box_width_fraction'])
+      np.ceil(lightConeSlice['box_width_fraction'])
     add_y_px = proj['px'][add_y_right]
     add_y_py = proj['py'][add_y_right]
-    add_y_py -= na.ceil(lightConeSlice['box_width_fraction'])
+    add_y_py -= np.ceil(lightConeSlice['box_width_fraction'])
     add_y_pdx = proj['pdx'][add_y_right]
     add_y_pdy = proj['pdy'][add_y_right]
     add_y_field = proj[field][add_y_right]
@@ -174,7 +174,7 @@
     add_y_left = proj['py'] - 0.5 * proj['pdy'] < 0
     add2_y_px = proj['px'][add_y_left]
     add2_y_py = proj['py'][add_y_left]
-    add2_y_py += na.ceil(lightConeSlice['box_width_fraction'])
+    add2_y_py += np.ceil(lightConeSlice['box_width_fraction'])
     add2_y_pdx = proj['pdx'][add_y_left]
     add2_y_pdy = proj['pdy'][add_y_left]
     add2_y_field = proj[field][add_y_left]
@@ -182,17 +182,17 @@
     del add_y_left
 
     # Add the hanging cells back to the projection data.
-    proj['px'] = na.concatenate([proj['px'], add_x_px, add_y_px,
+    proj['px'] = np.concatenate([proj['px'], add_x_px, add_y_px,
                                  add2_x_px, add2_y_px])
-    proj['py'] = na.concatenate([proj['py'], add_x_py, add_y_py,
+    proj['py'] = np.concatenate([proj['py'], add_x_py, add_y_py,
                                  add2_x_py, add2_y_py])
-    proj['pdx'] = na.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
+    proj['pdx'] = np.concatenate([proj['pdx'], add_x_pdx, add_y_pdx,
                                   add2_x_pdx, add2_y_pdx])
-    proj['pdy'] = na.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
+    proj['pdy'] = np.concatenate([proj['pdy'], add_x_pdy, add_y_pdy,
                                   add2_x_pdy, add2_y_pdy])
-    proj[field] = na.concatenate([proj[field], add_x_field, add_y_field,
+    proj[field] = np.concatenate([proj[field], add_x_field, add_y_field,
                                   add2_x_field, add2_y_field])
-    proj['weight_field'] = na.concatenate([proj['weight_field'],
+    proj['weight_field'] = np.concatenate([proj['weight_field'],
                                            add_x_weight_field, add_y_weight_field,
                                            add2_x_weight_field, add2_y_weight_field])
 

diff -r e968b07d70ba2ca468606bc65966b40c68520aa2 -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -24,7 +24,7 @@
 """
 
 import copy
-import numpy as na
+import numpy as np
 import random as rand
 import sys
 
@@ -128,7 +128,7 @@
         rand.seed(seed)
         state = rand.getstate()
 
-    fail_digits = str(int(na.log10(failures))+1)
+    fail_digits = str(int(np.log10(failures))+1)
 
     while (len(unique_seeds) < solutions):
         # Create new random seed.
@@ -221,7 +221,7 @@
         mylog.error("Light cone solutions do not have equal volumes, will use the smaller one.")
 
     for q in range(len(solution1)):
-        cube1 = na.zeros(shape=(len(solution1[q]['projection_center']), 2))
+        cube1 = np.zeros(shape=(len(solution1[q]['projection_center']), 2))
         volume1 = 1.0
         for w in range(len(cube1)):
             if (w == solution1[q]['projection_axis']):
@@ -232,7 +232,7 @@
             cube1[w] = [solution1[q]['projection_center'][w] - 0.5 * width,
                         solution1[q]['projection_center'][w] + 0.5 * width]
 
-        cube2 = na.zeros(shape=(len(solution2[q]['projection_center']), 2))
+        cube2 = np.zeros(shape=(len(solution2[q]['projection_center']), 2))
         volume2 = 1.0
         for w in range(len(cube2)):
             if (w == solution2[q]['projection_axis']):
@@ -245,7 +245,7 @@
 
         total_volume += min(volume1, volume2)
         my_volume += common_volume(cube1, cube2,
-                                   periodic=na.array([[0, 1],
+                                   periodic=np.array([[0, 1],
                                                       [0, 1],
                                                       [0, 1]]))
 

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/a8f0f480751a/
changeset:   a8f0f480751a
branch:      stable
user:        MatthewTurk
date:        2013-02-14 23:24:43
summary:     Updating to gold006 for gold standard.
affected #:  1 file

diff -r 3dfcccb39e3ecccc004f2c67f9e4534768c49a42 -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold005',
+    gold_standard_filename = 'gold006',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )


https://bitbucket.org/yt_analysis/yt/commits/9b336aa05056/
changeset:   9b336aa05056
branch:      stable
user:        MatthewTurk
date:        2013-02-15 21:59:29
summary:     Merging from many recent changes to development branch.
affected #:  32 files

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -36,14 +36,20 @@
 speed_of_light_kms = speed_of_light_cgs * km_per_cm
 
 class AbsorptionSpectrum(object):
+    r"""Create an absorption spectrum object.
+
+    Parameters
+    ----------
+
+    lambda_min : float
+       lower wavelength bound in angstroms.
+    lambda_max : float
+       upper wavelength bound in angstroms.
+    n_lambda : float
+       number of wavelength bins.
+    """
+
     def __init__(self, lambda_min, lambda_max, n_lambda):
-        """
-        Create an absorption spectrum object.
-        :param lambda_min (float): lower wavelength bound in angstroms.
-        :param lambda_max (float): upper wavelength bound in angstroms.
-        :param n_lambda (float): number of wavelength bins.
-        """
-
         self.n_lambda = n_lambda
         self.tau_field = None
         self.flux_field = None
@@ -56,16 +62,24 @@
     def add_line(self, label, field_name, wavelength,
                  f_value, gamma, atomic_mass,
                  label_threshold=None):
+        r"""Add an absorption line to the list of lines included in the spectrum.
+
+        Parameters
+        ----------
+        
+        label : string
+           label for the line.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        f_value  : float
+           line f-value.
+        gamma : float
+           line gamme value.
+        atomic_mass : float
+           mass of atom in amu.
         """
-        Add an absorption line to the list of lines included in the spectrum.
-        :param label (string): label for the line.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param f_value (float): line f-value.
-        :param gamma (float): line gamme value.
-        :param atomic_mass (float): mass of atom in amu.
-        """
-
         self.line_list.append({'label': label, 'field_name': field_name,
                                'wavelength': wavelength, 'f_value': f_value,
                                'gamma': gamma, 'atomic_mass': atomic_mass,
@@ -75,11 +89,20 @@
                       normalization, index):
         """
         Add a continuum feature that follows a power-law.
-        :param label (string): label for the feature.
-        :param field_name (string): field name from ray data for column densities.
-        :param wavelength (float): line rest wavelength in angstroms.
-        :param normalization (float): the column density normalization.
-        :param index (float): the power-law index for the wavelength dependence.
+
+        Parameters
+        ----------
+
+        label : string
+           label for the feature.
+        field_name : string
+           field name from ray data for column densities.
+        wavelength : float
+           line rest wavelength in angstroms.
+        normalization : float
+           the column density normalization.
+        index : float
+           the power-law index for the wavelength dependence.
         """
 
         self.continuum_list.append({'label': label, 'field_name': field_name,
@@ -92,14 +115,17 @@
                       use_peculiar_velocity=True):
         """
         Make spectrum from ray data using the line list.
-        :param input_file (string): path to input ray data.
-        :param output_file (string): path for output file.
-               File formats are chosen based on the filename extension.
-                    - .h5: hdf5.
-                    - .fits: fits.
-                    - anything else: ascii.
-        :param use_peculiar_velocity (bool): if True, include line of sight
-        velocity for shifting lines.
+
+        Parameters
+        ----------
+
+        input_file : string
+           path to input ray data.
+        output_file : string
+           path for output file.  File formats are chosen based on the filename extension.
+           ``.h5`` for hdf5, ``.fits`` for fits, and everything else is ASCII.
+        use_peculiar_velocity : bool
+           if True, include line of sight velocity for shifting lines.
         """
 
         input_fields = ['dl', 'redshift', 'Temperature']

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -106,8 +106,9 @@
     RadialColumnDensity
 
 from .spectral_integrator.api import \
-    SpectralFrequencyIntegrator, \
-    create_table_from_textfiles
+     add_xray_emissivity_field, \
+     add_xray_luminosity_field, \
+     add_xray_photon_emissivity_field
 
 from .star_analysis.api import \
     StarFormationRate, \

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -49,6 +49,64 @@
      _light_cone_projection
 
 class LightCone(CosmologySplice):
+    """
+    Initialize a LightCone object.
+
+    Parameters
+    ----------
+    near_redshift : float
+        The near (lowest) redshift for the light cone.
+    far_redshift : float
+        The far (highest) redshift for the light cone.
+    observer_redshift : float
+        The redshift of the observer.
+        Default: 0.0.
+    field_of_view_in_arcminutes : float
+        The field of view of the image in units of arcminutes.
+        Default: 600.0.
+    image_resolution_in_arcseconds : float
+        The size of each image pixel in units of arcseconds.
+        Default: 60.0.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the initial
+        and final redshift.  If false, the light cone solution will contain
+        as many entries as possible within the redshift interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive datasets in
+        the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter specifies
+        the fraction of the total box size to be traversed before rerandomizing
+        the projection axis and center.  This was invented to allow light cones
+        with thin slices to sample coherent large scale structure, but in
+        practice does not work so well.  Try setting this parameter to 1 and
+        see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+    set_parameters : dict
+        Dictionary of parameters to attach to pf.parameters.
+        Default: None.
+    output_dir : string
+        The directory in which images and data files will be written.
+        Default: 'LC'.
+    output_prefix : string
+        The prefix of all images and data files.
+        Default: 'LightCone'.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  observer_redshift=0.0,
@@ -59,64 +117,6 @@
                  time_data=True, redshift_data=True,
                  find_outputs=False, set_parameters=None,
                  output_dir='LC', output_prefix='LightCone'):
-        """
-        Initialize a LightCone object.
-
-        Parameters
-        ----------
-        near_redshift : float
-            The near (lowest) redshift for the light cone.
-        far_redshift : float
-            The far (highest) redshift for the light cone.
-        observer_redshift : float
-            The redshift of the observer.
-            Default: 0.0.
-        field_of_view_in_arcminutes : float
-            The field of view of the image in units of arcminutes.
-            Default: 600.0.
-        image_resolution_in_arcseconds : float
-            The size of each image pixel in units of arcseconds.
-            Default: 60.0.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the initial
-            and final redshift.  If false, the light cone solution will contain
-            as many entries as possible within the redshift interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive datasets in
-            the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter specifies
-            the fraction of the total box size to be traversed before rerandomizing
-            the projection axis and center.  This was invented to allow light cones
-            with thin slices to sample coherent large scale structure, but in
-            practice does not work so well.  Try setting this parameter to 1 and
-            see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-        set_parameters : dict
-            Dictionary of parameters to attach to pf.parameters.
-            Default: None.
-        output_dir : string
-            The directory in which images and data files will be written.
-            Default: 'LC'.
-        output_prefix : string
-            The prefix of all images and data files.
-            Default: 'LightCone'.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -40,66 +40,66 @@
     parallel_root_only
 
 class LightRay(CosmologySplice):
+    """
+    Create a LightRay object.  A light ray is much like a light cone,
+    in that it stacks together multiple datasets in order to extend a
+    redshift interval.  Unlike a light cone, which does randomly
+    oriented projections for each dataset, a light ray consists of
+    randomly oriented single rays.  The purpose of these is to create
+    synthetic QSO lines of sight.
+
+    Once the LightRay object is set up, use LightRay.make_light_ray to
+    begin making rays.  Different randomizations can be created with a
+    single object by providing different random seeds to make_light_ray.
+
+    Parameters
+    ----------
+    parameter_filename : string
+        The simulation parameter file.
+    simulation_type : string
+        The simulation type.
+    near_redshift : float
+        The near (lowest) redshift for the light ray.
+    far_redshift : float
+        The far (highest) redshift for the light ray.
+    use_minimum_datasets : bool
+        If True, the minimum number of datasets is used to connect the
+        initial and final redshift.  If false, the light ray solution
+        will contain as many entries as possible within the redshift
+        interval.
+        Default: True.
+    deltaz_min : float
+        Specifies the minimum :math:`\Delta z` between consecutive
+        datasets in the returned list.
+        Default: 0.0.
+    minimum_coherent_box_fraction : float
+        Used with use_minimum_datasets set to False, this parameter
+        specifies the fraction of the total box size to be traversed
+        before rerandomizing the projection axis and center.  This
+        was invented to allow light rays with thin slices to sample
+        coherent large scale structure, but in practice does not work
+        so well.  Try setting this parameter to 1 and see what happens.
+        Default: 0.0.
+    time_data : bool
+        Whether or not to include time outputs when gathering
+        datasets for time series.
+        Default: True.
+    redshift_data : bool
+        Whether or not to include redshift outputs when gathering
+        datasets for time series.
+        Default: True.
+    find_outputs : bool
+        Whether or not to search for parameter files in the current 
+        directory.
+        Default: False.
+
+    """
     def __init__(self, parameter_filename, simulation_type,
                  near_redshift, far_redshift,
                  use_minimum_datasets=True, deltaz_min=0.0,
                  minimum_coherent_box_fraction=0.0,
                  time_data=True, redshift_data=True,
                  find_outputs=False):
-        """
-        Create a LightRay object.  A light ray is much like a light cone,
-        in that it stacks together multiple datasets in order to extend a
-        redshift interval.  Unlike a light cone, which does randomly
-        oriented projections for each dataset, a light ray consists of
-        randomly oriented single rays.  The purpose of these is to create
-        synthetic QSO lines of sight.
-
-        Once the LightRay object is set up, use LightRay.make_light_ray to
-        begin making rays.  Different randomizations can be created with a
-        single object by providing different random seeds to make_light_ray.
-
-        Parameters
-        ----------
-        parameter_filename : string
-            The simulation parameter file.
-        simulation_type : string
-            The simulation type.
-        near_redshift : float
-            The near (lowest) redshift for the light ray.
-        far_redshift : float
-            The far (highest) redshift for the light ray.
-        use_minimum_datasets : bool
-            If True, the minimum number of datasets is used to connect the
-            initial and final redshift.  If false, the light ray solution
-            will contain as many entries as possible within the redshift
-            interval.
-            Default: True.
-        deltaz_min : float
-            Specifies the minimum :math:`\Delta z` between consecutive
-            datasets in the returned list.
-            Default: 0.0.
-        minimum_coherent_box_fraction : float
-            Used with use_minimum_datasets set to False, this parameter
-            specifies the fraction of the total box size to be traversed
-            before rerandomizing the projection axis and center.  This
-            was invented to allow light rays with thin slices to sample
-            coherent large scale structure, but in practice does not work
-            so well.  Try setting this parameter to 1 and see what happens.
-            Default: 0.0.
-        time_data : bool
-            Whether or not to include time outputs when gathering
-            datasets for time series.
-            Default: True.
-        redshift_data : bool
-            Whether or not to include redshift outputs when gathering
-            datasets for time series.
-            Default: True.
-        find_outputs : bool
-            Whether or not to search for parameter files in the current 
-            directory.
-            Default: False.
-
-        """
 
         self.near_redshift = near_redshift
         self.far_redshift = far_redshift
@@ -270,47 +270,43 @@
         Examples
         --------
 
-        from yt.mods import *
-        from yt.analysis_modules.halo_profiler.api import *
-        from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
-
-        halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
-
-        halo_profiler_actions = []
-        # Add a virial filter.
-        halo_profiler_actions.append({'function': add_halo_filter,
-                                      'args': VirialFilter,
-                                      'kwargs': {'overdensity_field': 'ActualOverdensity',
-                                                 'virial_overdensity': 200,
-                                                 'virial_filters': \
-                                                     [['TotalMassMsun','>=','1e14']],
-                                                 'virial_quantities': \
-                                                     ['TotalMassMsun','RadiusMpc']}})
-        # Make the profiles.
-        halo_profiler_actions.append({'function': make_profiles,
-                                      'args': None,
-                                      'kwargs': {'filename': 'VirializedHalos.out'}})
-
-        halo_list = 'filtered'
-
-        halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
-                                        halo_profiler_actions=halo_profiler_actions,
-                                        halo_list=halo_list)
-
-        my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
-                          use_minimum_datasets=True,
-                          time_data=False)
-
-        my_ray.make_light_ray(seed=12345,
-                              solution_filename='solution.txt',
-                              data_filename='my_ray.h5',
-                              fields=['Temperature', 'Density'],
-                              get_nearest_halo=True,
-                              nearest_halo_fields=['TotalMassMsun_100',
-                                                   'RadiusMpc_100'],
-                              halo_profiler_parameters=halo_profiler_parameters,
-                              get_los_velocity=True)
-
+        >>> from yt.mods import *
+        >>> from yt.analysis_modules.halo_profiler.api import *
+        >>> from yt.analysis_modules.cosmological_analysis.light_ray.api import LightRay
+        >>> halo_profiler_kwargs = {'halo_list_file': 'HopAnalysis.out'}
+        >>> halo_profiler_actions = []
+        >>> # Add a virial filter.
+        >>> halo_profiler_actions.append({'function': add_halo_filter,
+        ...                           'args': VirialFilter,
+        ...                           'kwargs': {'overdensity_field': 'ActualOverdensity',
+        ...                                      'virial_overdensity': 200,
+        ...                                      'virial_filters': [['TotalMassMsun','>=','1e14']],
+        ...                                      'virial_quantities': ['TotalMassMsun','RadiusMpc']}})
+        ...
+        >>> # Make the profiles.
+        >>> halo_profiler_actions.append({'function': make_profiles,
+        ...                           'args': None,
+        ...                           'kwargs': {'filename': 'VirializedHalos.out'}})
+        ...
+        >>> halo_list = 'filtered'
+        >>> halo_profiler_parameters = dict(halo_profiler_kwargs=halo_profiler_kwargs,
+        ...                             halo_profiler_actions=halo_profiler_actions,
+        ...                             halo_list=halo_list)
+        ...
+        >>> my_ray = LightRay('simulation.par', 'Enzo', 0., 0.1,
+        ...                use_minimum_datasets=True,
+        ...                time_data=False)
+        ...
+        >>> my_ray.make_light_ray(seed=12345,
+        ...                   solution_filename='solution.txt',
+        ...                   data_filename='my_ray.h5',
+        ...                   fields=['Temperature', 'Density'],
+        ...                   get_nearest_halo=True,
+        ...                   nearest_halo_fields=['TotalMassMsun_100',
+        ...                                        'RadiusMpc_100'],
+        ...                   halo_profiler_parameters=halo_profiler_parameters,
+        ...                   get_los_velocity=True)
+        
         """
 
         if halo_profiler_parameters is None:

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -2088,88 +2088,88 @@
 
 
 class parallelHF(GenericHaloFinder, parallelHOPHaloList):
+    r"""Parallel HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    This is very similar to HOP, but it does not produce precisely the
+    same halos due to unavoidable numerical differences.
+
+    Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
+    Cosmological Data Sets." arXiv (2010) 1001.3411
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    resize : bool
+        Turns load-balancing on or off. Default = True.
+    kdtree : string
+        Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
+        faster, but uses more memory. The Cython one (kdtree = 'C') is
+        slower but is more memory efficient.
+        Default = 'F'
+    rearrange : bool
+        Turns on faster nearest neighbor searches at the cost of increased
+        memory usage.
+        This option only applies when using the Fortran tree.
+        Default = True.
+    fancy_padding : bool
+        True calculates padding independently for each face of each
+        subvolume. Default = True.
+    safety : float
+        Due to variances in inter-particle spacing in the volume, the
+        padding may need to be increased above the raw calculation.
+        This number is multiplied to the calculated padding, and values
+        >1 increase the padding. Default = 1.5.
+    premerge : bool
+        True merges chains in two steps (rather than one with False), which
+        can speed up halo finding by 25% or more. However, True can result
+        in small (<<1%) variations in the final halo masses when compared
+        to False. Default = True.
+    sample : float
+        The fraction of the full dataset on which load-balancing is
+        performed. Default = 0.03.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+    num_particles : integer
+        The total number of particles in the volume, in the same fashion
+        as `total_mass` is calculated. Specifying this turns off
+        fancy_padding.
+        Default = None, which means the number of particles is
+        automatically calculated.
+
+    Examples
+    -------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = parallelHF(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True, \
         resize=True, rearrange=True,\
         fancy_padding=True, safety=1.5, premerge=True, sample=0.03, \
         total_mass=None, num_particles=None, tree='F'):
-        r"""Parallel HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        This is very similar to HOP, but it does not produce precisely the
-        same halos due to unavoidable numerical differences.
-
-        Skory et al. "Parallel HOP: A Scalable Halo Finder for Massive
-        Cosmological Data Sets." arXiv (2010) 1001.3411
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        resize : bool
-            Turns load-balancing on or off. Default = True.
-        kdtree : string
-            Chooses which kD Tree to use. The Fortran one (kdtree = 'F') is
-            faster, but uses more memory. The Cython one (kdtree = 'C') is
-            slower but is more memory efficient.
-            Default = 'F'
-        rearrange : bool
-            Turns on faster nearest neighbor searches at the cost of increased
-            memory usage.
-            This option only applies when using the Fortran tree.
-            Default = True.
-        fancy_padding : bool
-            True calculates padding independently for each face of each
-            subvolume. Default = True.
-        safety : float
-            Due to variances in inter-particle spacing in the volume, the
-            padding may need to be increased above the raw calculation.
-            This number is multiplied to the calculated padding, and values
-            >1 increase the padding. Default = 1.5.
-        premerge : bool
-            True merges chains in two steps (rather than one with False), which
-            can speed up halo finding by 25% or more. However, True can result
-            in small (<<1%) variations in the final halo masses when compared
-            to False. Default = True.
-        sample : float
-            The fraction of the full dataset on which load-balancing is
-            performed. Default = 0.03.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-        num_particles : integer
-            The total number of particles in the volume, in the same fashion
-            as `total_mass` is calculated. Specifying this turns off
-            fancy_padding.
-            Default = None, which means the number of particles is
-            automatically calculated.
-
-        Examples
-        -------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = parallelHF(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2416,58 +2416,58 @@
 
 
 class HOPHaloFinder(GenericHaloFinder, HOPHaloList):
+    r"""HOP halo finder.
+
+    Halos are built by:
+    1. Calculating a density for each particle based on a smoothing kernel.
+    2. Recursively linking particles to other particles from lower density
+    particles to higher.
+    3. Geometrically proximate chains are identified and
+    4. merged into final halos following merging rules.
+
+    Lower thresholds generally produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
+    Simulations." ApJ (1998) vol. 498 pp. 137-142
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    threshold : float
+        The density threshold used when building halos. Default = 160.0.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+    total_mass : float
+        If HOP is run on the same dataset mulitple times, the total mass
+        of particles in Msun units in the full volume can be supplied here
+        to save time.
+        This must correspond to the particles being operated on, meaning
+        if stars are included in the halo finding, they must be included
+        in this mass as well, and visa-versa.
+        If halo finding on a subvolume, this still corresponds with the
+        mass in the entire volume.
+        Default = None, which means the total mass is automatically
+        calculated.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = HaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, threshold=160, dm_only=True,
             padding=0.02, total_mass=None):
-        r"""HOP halo finder.
-
-        Halos are built by:
-        1. Calculating a density for each particle based on a smoothing kernel.
-        2. Recursively linking particles to other particles from lower density
-        particles to higher.
-        3. Geometrically proximate chains are identified and
-        4. merged into final halos following merging rules.
-
-        Lower thresholds generally produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Eisenstein and Hut. "HOP: A New Group-Finding Algorithm for N-Body
-        Simulations." ApJ (1998) vol. 498 pp. 137-142
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        threshold : float
-            The density threshold used when building halos. Default = 160.0.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-        total_mass : float
-            If HOP is run on the same dataset mulitple times, the total mass
-            of particles in Msun units in the full volume can be supplied here
-            to save time.
-            This must correspond to the particles being operated on, meaning
-            if stars are included in the halo finding, they must be included
-            in this mass as well, and visa-versa.
-            If halo finding on a subvolume, this still corresponds with the
-            mass in the entire volume.
-            Default = None, which means the total mass is automatically
-            calculated.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = HaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2521,47 +2521,47 @@
 
 
 class FOFHaloFinder(GenericHaloFinder, FOFHaloList):
+    r"""Friends-of-friends halo finder.
+
+    Halos are found by linking together all pairs of particles closer than
+    some distance from each other. Particles may have multiple links,
+    and halos are found by recursively linking together all such pairs.
+
+    Larger linking lengths produce more halos, and the largest halos
+    become larger. Also, halos become more filamentary and over-connected.
+
+    Davis et al. "The evolution of large-scale structure in a universe
+    dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
+
+    Parameters
+    ----------
+    pf : `StaticOutput`
+        The parameter file on which halo finding will be conducted.
+    subvolume : `yt.data_objects.api.AMRData`, optional
+        A region over which HOP will be run, which can be used to run HOP
+        on a subvolume of the full volume. Default = None, which defaults
+        to the full volume automatically.
+    link : float
+        If positive, the interparticle distance (compared to the overall
+        average) used to build the halos. If negative, this is taken to be
+        the *actual* linking length, and no other calculations will be
+        applied.  Default = 0.2.
+    dm_only : bool
+        If True, only dark matter particles are used when building halos.
+        Default = False.
+    padding : float
+        When run in parallel, the finder needs to surround each subvolume
+        with duplicated particles for halo finidng to work. This number
+        must be no smaller than the radius of the largest halo in the box
+        in code units. Default = 0.02.
+
+    Examples
+    --------
+    >>> pf = load("RedshiftOutput0000")
+    >>> halos = FOFHaloFinder(pf)
+    """
     def __init__(self, pf, subvolume=None, link=0.2, dm_only=True,
         padding=0.02):
-        r"""Friends-of-friends halo finder.
-
-        Halos are found by linking together all pairs of particles closer than
-        some distance from each other. Particles may have multiple links,
-        and halos are found by recursively linking together all such pairs.
-
-        Larger linking lengths produce more halos, and the largest halos
-        become larger. Also, halos become more filamentary and over-connected.
-
-        Davis et al. "The evolution of large-scale structure in a universe
-        dominated by cold dark matter." ApJ (1985) vol. 292 pp. 371-394
-
-        Parameters
-        ----------
-        pf : `StaticOutput`
-            The parameter file on which halo finding will be conducted.
-        subvolume : `yt.data_objects.api.AMRData`, optional
-            A region over which HOP will be run, which can be used to run HOP
-            on a subvolume of the full volume. Default = None, which defaults
-            to the full volume automatically.
-        link : float
-            If positive, the interparticle distance (compared to the overall
-            average) used to build the halos. If negative, this is taken to be
-            the *actual* linking length, and no other calculations will be
-            applied.  Default = 0.2.
-        dm_only : bool
-            If True, only dark matter particles are used when building halos.
-            Default = False.
-        padding : float
-            When run in parallel, the finder needs to surround each subvolume
-            with duplicated particles for halo finidng to work. This number
-            must be no smaller than the radius of the largest halo in the box
-            in code units. Default = 0.02.
-
-        Examples
-        --------
-        >>> pf = load("RedshiftOutput0000")
-        >>> halos = FOFHaloFinder(pf)
-        """
         if subvolume is not None:
             ds_LE = np.array(subvolume.left_edge)
             ds_RE = np.array(subvolume.right_edge)
@@ -2610,84 +2610,84 @@
 
 
 class LoadHaloes(GenericHaloFinder, LoadedHaloList):
+    r"""Load the full halo data into memory.
+
+    This function takes the output of `GenericHaloFinder.dump` and
+    re-establishes the list of halos in memory. This enables the full set
+    of halo analysis features without running the halo finder again. To
+    be precise, the particle data for each halo is only read in when
+    necessary, so examining a single halo will not require as much memory
+    as is required for halo finding.
+
+    Parameters
+    ----------
+    basename : String
+        The base name of the files that will be read in. This should match
+        what was used when `GenericHaloFinder.dump` was called. Default =
+        "HopAnalysis".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadHaloes(pf, "HopAnalysis")
+    """
     def __init__(self, pf, basename):
-        r"""Load the full halo data into memory.
-
-        This function takes the output of `GenericHaloFinder.dump` and
-        re-establishes the list of halos in memory. This enables the full set
-        of halo analysis features without running the halo finder again. To
-        be precise, the particle data for each halo is only read in when
-        necessary, so examining a single halo will not require as much memory
-        as is required for halo finding.
-
-        Parameters
-        ----------
-        basename : String
-            The base name of the files that will be read in. This should match
-            what was used when `GenericHaloFinder.dump` was called. Default =
-            "HopAnalysis".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadHaloes(pf, "HopAnalysis")
-        """
         self.basename = basename
         LoadedHaloList.__init__(self, pf, self.basename)
 
 class LoadTextHaloes(GenericHaloFinder, TextHaloList):
+    r"""Load a text file of halos.
+    
+    Like LoadHaloes, but when all that is available is a plain
+    text file. This assumes the text file has the 3-positions of halos
+    along with a radius. The halo objects created are spheres.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the text file to read in.
+    
+    columns : dict
+        A dict listing the column name : column number pairs for data
+        in the text file. It is zero-based (like Python).
+        An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
+        Any column name outside of ['x', 'y', 'z', 'r'] will be attached
+        to each halo object in the supplementary dict 'supp'. See
+        example.
+    
+    comment : String
+        If the first character of a line is equal to this, the line is
+        skipped. Default = "#".
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadTextHaloes(pf, "list.txt",
+        {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
+        comment = ";")
+    >>> halos[0].supp['m']
+        3.28392048e14
+    """
     def __init__(self, pf, filename, columns, comment = "#"):
-        r"""Load a text file of halos.
-        
-        Like LoadHaloes, but when all that is available is a plain
-        text file. This assumes the text file has the 3-positions of halos
-        along with a radius. The halo objects created are spheres.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the text file to read in.
-        
-        columns : dict
-            A dict listing the column name : column number pairs for data
-            in the text file. It is zero-based (like Python).
-            An example is {'x':0, 'y':1, 'z':2, 'r':3, 'm':4}.
-            Any column name outside of ['x', 'y', 'z', 'r'] will be attached
-            to each halo object in the supplementary dict 'supp'. See
-            example.
-        
-        comment : String
-            If the first character of a line is equal to this, the line is
-            skipped. Default = "#".
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadTextHaloes(pf, "list.txt",
-            {'x':0, 'y':1, 'z':2, 'r':3, 'm':4},
-            comment = ";")
-        >>> halos[0].supp['m']
-            3.28392048e14
-        """
         TextHaloList.__init__(self, pf, filename, columns, comment)
 
 LoadTextHalos = LoadTextHaloes
 
 class LoadRockstarHalos(GenericHaloFinder, RockstarHaloList):
+    r"""Load Rockstar halos off disk from Rockstar-output format.
+
+    Parameters
+    ----------
+    fname : String
+        The name of the Rockstar file to read in. Default = 
+        "rockstar_halos/out_0.list'.
+
+    Examples
+    --------
+    >>> pf = load("data0005")
+    >>> halos = LoadRockstarHalos(pf, "other_name.out")
+    """
     def __init__(self, pf, filename = None):
-        r"""Load Rockstar halos off disk from Rockstar-output format.
-
-        Parameters
-        ----------
-        fname : String
-            The name of the Rockstar file to read in. Default = 
-            "rockstar_halos/out_0.list'.
-
-        Examples
-        --------
-        >>> pf = load("data0005")
-        >>> halos = LoadRockstarHalos(pf, "other_name.out")
-        """
         if filename is None:
             filename = 'rockstar_halos/out_0.list'
         RockstarHaloList.__init__(self, pf, filename)

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -114,80 +114,80 @@
         return pool, workgroup
 
 class RockstarHaloFinder(ParallelAnalysisInterface):
+    r"""Spawns the Rockstar Halo finder, distributes dark matter
+    particles and finds halos.
+
+    The halo finder requires dark matter particles of a fixed size.
+    Rockstar has three main processes: reader, writer, and the 
+    server which coordinates reader/writer processes.
+
+    Parameters
+    ----------
+    ts   : TimeSeriesData, StaticOutput
+        This is the data source containing the DM particles. Because 
+        halo IDs may change from one snapshot to the next, the only
+        way to keep a consistent halo ID across time is to feed 
+        Rockstar a set of snapshots, ie, via TimeSeriesData.
+    num_readers: int
+        The number of reader can be increased from the default
+        of 1 in the event that a single snapshot is split among
+        many files. This can help in cases where performance is
+        IO-limited. Default is 1. If run inline, it is
+        equal to the number of MPI threads.
+    num_writers: int
+        The number of writers determines the number of processing threads
+        as well as the number of threads writing output data.
+        The default is set to comm.size-num_readers-1. If run inline,
+        the default is equal to the number of MPI threads.
+    outbase: str
+        This is where the out*list files that Rockstar makes should be
+        placed. Default is 'rockstar_halos'.
+    dm_type: 1
+        In order to exclude stars and other particle types, define
+        the dm_type. Default is 1, as Enzo has the DM particle type=1.
+    force_res: float
+        This parameter specifies the force resolution that Rockstar uses
+        in units of Mpc/h.
+        If no value is provided, this parameter is automatically set to
+        the width of the smallest grid element in the simulation from the
+        last data snapshot (i.e. the one where time has evolved the
+        longest) in the time series:
+        ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
+    total_particles : int
+        If supplied, this is a pre-calculated total number of dark matter
+        particles present in the simulation. For example, this is useful
+        when analyzing a series of snapshots where the number of dark
+        matter particles should not change and this will save some disk
+        access time. If left unspecified, it will
+        be calculated automatically. Default: ``None``.
+    dm_only : boolean
+        If set to ``True``, it will be assumed that there are only dark
+        matter particles present in the simulation. This can save analysis
+        time if this is indeed the case. Default: ``False``.
+        
+    Returns
+    -------
+    None
+
+    Examples
+    --------
+    To use the script below you must run it using MPI:
+    mpirun -np 3 python test_rockstar.py --parallel
+
+    test_rockstar.py:
+
+    from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
+    from yt.mods import *
+    import sys
+
+    ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
+    pm = 7.81769027e+11
+    rh = RockstarHaloFinder(ts)
+    rh.run()
+    """
     def __init__(self, ts, num_readers = 1, num_writers = None,
             outbase="rockstar_halos", dm_type=1, 
             force_res=None, total_particles=None, dm_only=False):
-        r"""Spawns the Rockstar Halo finder, distributes dark matter
-        particles and finds halos.
-
-        The halo finder requires dark matter particles of a fixed size.
-        Rockstar has three main processes: reader, writer, and the 
-        server which coordinates reader/writer processes.
-
-        Parameters
-        ----------
-        ts   : TimeSeriesData, StaticOutput
-            This is the data source containing the DM particles. Because 
-            halo IDs may change from one snapshot to the next, the only
-            way to keep a consistent halo ID across time is to feed 
-            Rockstar a set of snapshots, ie, via TimeSeriesData.
-        num_readers: int
-            The number of reader can be increased from the default
-            of 1 in the event that a single snapshot is split among
-            many files. This can help in cases where performance is
-            IO-limited. Default is 1. If run inline, it is
-            equal to the number of MPI threads.
-        num_writers: int
-            The number of writers determines the number of processing threads
-            as well as the number of threads writing output data.
-            The default is set to comm.size-num_readers-1. If run inline,
-            the default is equal to the number of MPI threads.
-        outbase: str
-            This is where the out*list files that Rockstar makes should be
-            placed. Default is 'rockstar_halos'.
-        dm_type: 1
-            In order to exclude stars and other particle types, define
-            the dm_type. Default is 1, as Enzo has the DM particle type=1.
-        force_res: float
-            This parameter specifies the force resolution that Rockstar uses
-            in units of Mpc/h.
-            If no value is provided, this parameter is automatically set to
-            the width of the smallest grid element in the simulation from the
-            last data snapshot (i.e. the one where time has evolved the
-            longest) in the time series:
-            ``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
-        total_particles : int
-            If supplied, this is a pre-calculated total number of dark matter
-            particles present in the simulation. For example, this is useful
-            when analyzing a series of snapshots where the number of dark
-            matter particles should not change and this will save some disk
-            access time. If left unspecified, it will
-            be calculated automatically. Default: ``None``.
-        dm_only : boolean
-            If set to ``True``, it will be assumed that there are only dark
-            matter particles present in the simulation. This can save analysis
-            time if this is indeed the case. Default: ``False``.
-            
-        Returns
-        -------
-        None
-
-        Examples
-        --------
-        To use the script below you must run it using MPI:
-        mpirun -np 3 python test_rockstar.py --parallel
-
-        test_rockstar.py:
-
-        from yt.analysis_modules.halo_finding.rockstar.api import RockstarHaloFinder
-        from yt.mods import *
-        import sys
-
-        ts = TimeSeriesData.from_filenames('/u/cmoody3/data/a*')
-        pm = 7.81769027e+11
-        rh = RockstarHaloFinder(ts)
-        rh.run()
-        """
         mylog.warning("The citation for the Rockstar halo finder can be found at")
         mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
         ParallelAnalysisInterface.__init__(self)

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/halo_mass_function/halo_mass_function.py
--- a/yt/analysis_modules/halo_mass_function/halo_mass_function.py
+++ b/yt/analysis_modules/halo_mass_function/halo_mass_function.py
@@ -33,52 +33,52 @@
     parallel_blocking_call
 
 class HaloMassFcn(ParallelAnalysisInterface):
+    """
+    Initalize a HaloMassFcn object to analyze the distribution of haloes
+    as a function of mass.
+    :param halo_file (str): The filename of the output of the Halo Profiler.
+    Default=None.
+    :param omega_matter0 (float): The fraction of the universe made up of
+    matter (dark and baryonic). Default=None.
+    :param omega_lambda0 (float): The fraction of the universe made up of
+    dark energy. Default=None.
+    :param omega_baryon0 (float): The fraction of the universe made up of
+    ordinary baryonic matter. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.05.
+    :param hubble0 (float): The expansion rate of the universe in units of
+    100 km/s/Mpc. Default=None.
+    :param sigma8input (float): The amplitude of the linear power
+    spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
+    in a top-hat sphere of radius 8 Mpc/h. This should match the value
+    used to create the initial conditions, using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=0.86.
+    :param primoridal_index (float): This is the index of the mass power
+    spectrum before modification by the transfer function. A value of 1
+    corresponds to the scale-free primordial spectrum. This should match
+    the value used to make the initial conditions using 'inits'. This is 
+    *not* stored in the enzo datset so it must be checked by hand.
+    Default=1.0.
+    :param this_redshift (float): The current redshift. Default=None.
+    :param log_mass_min (float): The log10 of the mass of the minimum of the
+    halo mass range. Default=None.
+    :param log_mass_max (float): The log10 of the mass of the maximum of the
+    halo mass range. Default=None.
+    :param num_sigma_bins (float): The number of bins (points) to use for
+    the calculations and generated fit. Default=360.
+    :param fitting_function (int): Which fitting function to use.
+    1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
+    5 = Tinker
+    Default=4.
+    :param mass_column (int): The column of halo_file that contains the
+    masses of the haloes. Default=4.
+    """
     def __init__(self, pf, halo_file=None, omega_matter0=None, omega_lambda0=None,
     omega_baryon0=0.05, hubble0=None, sigma8input=0.86, primordial_index=1.0,
     this_redshift=None, log_mass_min=None, log_mass_max=None, num_sigma_bins=360,
     fitting_function=4, mass_column=5):
-        """
-        Initalize a HaloMassFcn object to analyze the distribution of haloes
-        as a function of mass.
-        :param halo_file (str): The filename of the output of the Halo Profiler.
-        Default=None.
-        :param omega_matter0 (float): The fraction of the universe made up of
-        matter (dark and baryonic). Default=None.
-        :param omega_lambda0 (float): The fraction of the universe made up of
-        dark energy. Default=None.
-        :param omega_baryon0 (float): The fraction of the universe made up of
-        ordinary baryonic matter. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.05.
-        :param hubble0 (float): The expansion rate of the universe in units of
-        100 km/s/Mpc. Default=None.
-        :param sigma8input (float): The amplitude of the linear power
-        spectrum at z=0 as specified by the rms amplitude of mass-fluctuations
-        in a top-hat sphere of radius 8 Mpc/h. This should match the value
-        used to create the initial conditions, using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=0.86.
-        :param primoridal_index (float): This is the index of the mass power
-        spectrum before modification by the transfer function. A value of 1
-        corresponds to the scale-free primordial spectrum. This should match
-        the value used to make the initial conditions using 'inits'. This is 
-        *not* stored in the enzo datset so it must be checked by hand.
-        Default=1.0.
-        :param this_redshift (float): The current redshift. Default=None.
-        :param log_mass_min (float): The log10 of the mass of the minimum of the
-        halo mass range. Default=None.
-        :param log_mass_max (float): The log10 of the mass of the maximum of the
-        halo mass range. Default=None.
-        :param num_sigma_bins (float): The number of bins (points) to use for
-        the calculations and generated fit. Default=360.
-        :param fitting_function (int): Which fitting function to use.
-        1 = Press-schechter, 2 = Jenkins, 3 = Sheth-Tormen, 4 = Warren fit
-        5 = Tinker
-        Default=4.
-        :param mass_column (int): The column of halo_file that contains the
-        masses of the haloes. Default=4.
-        """
         ParallelAnalysisInterface.__init__(self)
         self.pf = pf
         self.halo_file = halo_file
@@ -543,22 +543,22 @@
 """
 
 class TransferFunction(object):
+    """
+    /* This routine takes cosmological parameters and a redshift and sets up
+    all the internal scalar quantities needed to compute the transfer function. */
+    /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
+                    in units of the critical density. */
+    /* 	  omega_baryon -- Density of baryons, in units of critical. */
+    /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
+    /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
+    /*        omega_lambda -- Cosmological constant */
+    /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
+    /*        redshift     -- The redshift at which to evaluate */
+    /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
+        sets many global variables for use in TFmdm_onek_mpc() */
+    """
     def __init__(self, omega_matter, omega_baryon, omega_hdm,
 	    degen_hdm, omega_lambda, hubble, redshift):
-        """
-        /* This routine takes cosmological parameters and a redshift and sets up
-        all the internal scalar quantities needed to compute the transfer function. */
-        /* INPUT: omega_matter -- Density of CDM, baryons, and massive neutrinos,
-                        in units of the critical density. */
-        /* 	  omega_baryon -- Density of baryons, in units of critical. */
-        /* 	  omega_hdm    -- Density of massive neutrinos, in units of critical */
-        /* 	  degen_hdm    -- (Int) Number of degenerate massive neutrino species */
-        /*        omega_lambda -- Cosmological constant */
-        /* 	  hubble       -- Hubble constant, in units of 100 km/s/Mpc */
-        /*        redshift     -- The redshift at which to evaluate */
-        /* OUTPUT: Returns 0 if all is well, 1 if a warning was issued.  Otherwise,
-            sets many global variables for use in TFmdm_onek_mpc() */
-        """
         self.qwarn = 0;
         self.theta_cmb = 2.728/2.7 # Assuming T_cmb = 2.728 K
     

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -123,52 +123,55 @@
         self.conn.close()
 
 class MergerTree(DatabaseFunctions, ParallelAnalysisInterface):
+    r"""Build a merger tree of halos over a time-ordered set of snapshots.
+    This will run a halo finder to find the halos first if it hasn't already
+    been done. The output is a SQLite database file, which may need to
+    be stored on a different disk than the data snapshots. See the full
+    documentation for details.
+    
+    Parameters
+    ----------
+    
+    restart_files : List of strings
+        A list containing the paths to the forward time-ordered set of
+        data snapshots.
+    database : String
+        Name of SQLite database file. Default = "halos.db".
+    halo_finder_function : HaloFinder name
+        The name of the halo finder to use if halo finding is run by 
+        the merger tree. Options: HaloFinder, FOFHaloFinder, parallelHF.
+        Note that this is not a string, so no quotes. Default = HaloFinder.
+    halo_finder_threshold : Float
+        If using HaloFinder or parallelHF, the value of the density threshold
+        used when halo finding. Default = 80.0.
+    FOF_link_length : Float
+        If using FOFHaloFinder, the linking length between particles.
+        Default = 0.2.
+    dm_only : Boolean
+        When halo finding, whether to restrict to only dark matter particles.
+        Default = False.
+    refresh : Boolean
+        True forces the halo finder to run even if the halo data has been
+        detected on disk. Default = False.
+    index : Boolean
+        SQLite databases can have added to them an index which greatly
+        speeds up future queries of the database,
+        at the cost of doubling the disk space used by the file.
+        Default = True.
+
+    Examples
+    --------
+
+    >>> rf = ['/scratch/user/sim1/DD0000/data0000',
+    ... '/scratch/user/sim1/DD0001/data0001',
+    ... '/scratch/user/sim1/DD0002/data0002']
+    >>> MergerTree(rf, database = '/home/user/sim1-halos.db',
+    ... halo_finder_function=parallelHF)
+    """
     def __init__(self, restart_files=[], database='halos.db',
             halo_finder_function=HaloFinder, halo_finder_threshold=80.0,
             FOF_link_length=0.2, dm_only=False, refresh=False,
             index=True):
-        r"""Build a merger tree of halos over a time-ordered set of snapshots.
-        This will run a halo finder to find the halos first if it hasn't already
-        been done. The output is a SQLite database file, which may need to
-        be stored on a different disk than the data snapshots. See the full
-        documentation for details.
-        
-        Parameters
-        ---------
-        restart_files : List of strings
-            A list containing the paths to the forward time-ordered set of
-            data snapshots.
-        database : String
-            Name of SQLite database file. Default = "halos.db".
-        halo_finder_function : HaloFinder name
-            The name of the halo finder to use if halo finding is run by 
-            the merger tree. Options: HaloFinder, FOFHaloFinder, parallelHF.
-            Note that this is not a string, so no quotes. Default = HaloFinder.
-        halo_finder_threshold : Float
-            If using HaloFinder or parallelHF, the value of the density threshold
-            used when halo finding. Default = 80.0.
-        FOF_link_length : Float
-            If using FOFHaloFinder, the linking length between particles.
-            Default = 0.2.
-        dm_only : Boolean
-            When halo finding, whether to restrict to only dark matter particles.
-            Default = False.
-        refresh : Boolean
-            True forces the halo finder to run even if the halo data has been
-            detected on disk. Default = False.
-        index : Boolean
-            SQLite databases can have added to them an index which greatly
-            speeds up future queries of the database,
-            at the cost of doubling the disk space used by the file.
-            Default = True.
-
-        Examples:
-        >>> rf = ['/scratch/user/sim1/DD0000/data0000',
-        ... '/scratch/user/sim1/DD0001/data0001',
-        ... '/scratch/user/sim1/DD0002/data0002']
-        >>> MergerTree(rf, database = '/home/user/sim1-halos.db',
-        ... halo_finder_function=parallelHF)
-        """
         ParallelAnalysisInterface.__init__(self)
         self.restart_files = restart_files # list of enzo restart files
         self.with_halos = np.ones(len(restart_files), dtype='bool')
@@ -726,18 +729,18 @@
         return 0.
 
 class MergerTreeConnect(DatabaseFunctions):
+    r"""Create a convenience object for accessing data from the halo database.
+    
+    Parameters
+    ----------
+    database : String
+        The name of the halo database to access. Default = 'halos.db'.
+    
+    Examples
+    -------
+    >>> mtc = MergerTreeConnect('/home/user/sim1-halos.db')
+    """
     def __init__(self, database='halos.db'):
-        r"""Create a convenience object for accessing data from the halo database.
-        
-        Parameters
-        ----------
-        database : String
-            The name of the halo database to access. Default = 'halos.db'.
-        
-        Examples
-        -------
-        >>> mtc = MergerTreeConnect('/home/user/sim1-halos.db')
-        """
         self.database = database
         result = self._open_database()
         if not result:
@@ -783,7 +786,8 @@
         r"""Returns the GlobalHaloID for the given halo.
         
         Parameters
-        ---------
+        ----------
+
         SnapHaloID : Integer
             The index label for the halo of interest, equivalent to
             the first column of the halo finder text output file.
@@ -794,6 +798,7 @@
         
         Examples
         --------
+
         >>> this_halo = mtc.get_GlobalHaloID(0, 0.)
         """
         string = "SELECT GlobalHaloID,SnapZ FROM Halos WHERE SnapHaloID = %d;" \
@@ -933,44 +938,46 @@
         self.fractions = []
 
 class MergerTreeDotOutput(DatabaseFunctions, ParallelAnalysisInterface):
+    r"""Output the merger tree history for a given set of halo(s) in Graphviz
+    format.
+    
+    Parameters
+    ----------
+
+    halos : Integer or list of integers
+        If current_time below is not specified or is None, this is an integer
+        or list of integers with the GlobalHaloIDs of the halos to be
+        tracked. If current_time is specified, this is the SnapHaloIDs
+        for the halos to be tracked, which is identical to what is in
+        HopAnalysis.out files (for example).
+    database : String
+        The name of the database file. Default = 'halos.db'.
+    dotfile : String
+        The name of the file to write to. Default = 'MergerTree.gv'.
+        The suffix of this name gives the format of the output file,
+        so 'MergerTree.jpg' would output a jpg file. "dot -v" (from the
+        command line) will print
+        a list of image formats supported on the system. The default
+        suffix '.gv' will output the results to a text file in the Graphviz
+        markup language.
+    current_time : Integer
+        The SnapCurrentTimeIdentifier for the snapshot for the halos to
+        be tracked. This is identical to the CurrentTimeIdentifier in
+        Enzo restart files. Default = None.
+    link_min : Float
+        When establishing a parent/child relationship, this is the minimum
+        mass fraction of the parent halo contributed to
+        the child halo that will be tracked
+        while building the Graphviz file. Default = 0.2.
+    
+    Examples
+    --------
+
+    >>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
+    ... dotfile = 'halo-182842.gv')
+    """
     def __init__(self, halos=None, database='halos.db',
             dotfile='MergerTree.gv', current_time=None, link_min=0.2):
-        r"""Output the merger tree history for a given set of halo(s) in Graphviz
-        format.
-        
-        Parameters
-        ---------
-        halos : Integer or list of integers
-            If current_time below is not specified or is None, this is an integer
-            or list of integers with the GlobalHaloIDs of the halos to be
-            tracked. If current_time is specified, this is the SnapHaloIDs
-            for the halos to be tracked, which is identical to what is in
-            HopAnalysis.out files (for example).
-        database : String
-            The name of the database file. Default = 'halos.db'.
-        dotfile : String
-            The name of the file to write to. Default = 'MergerTree.gv'.
-            The suffix of this name gives the format of the output file,
-            so 'MergerTree.jpg' would output a jpg file. "dot -v" (from the
-            command line) will print
-            a list of image formats supported on the system. The default
-            suffix '.gv' will output the results to a text file in the Graphviz
-            markup language.
-        current_time : Integer
-            The SnapCurrentTimeIdentifier for the snapshot for the halos to
-            be tracked. This is identical to the CurrentTimeIdentifier in
-            Enzo restart files. Default = None.
-        link_min : Float
-            When establishing a parent/child relationship, this is the minimum
-            mass fraction of the parent halo contributed to
-            the child halo that will be tracked
-            while building the Graphviz file. Default = 0.2.
-        
-        Examples
-        --------
-        >>> MergerTreeDotOutput(halos=182842, database='/home/user/sim1-halos.db',
-        ... dotfile = 'halo-182842.gv')
-        """
         ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.link_min = link_min
@@ -1102,22 +1109,22 @@
         self.graph.write("%s" % dotfile, format=suffix)
 
 class MergerTreeTextOutput(DatabaseFunctions, ParallelAnalysisInterface):
+    r"""Dump the contents of the merger tree database to a text file.
+    This is generally not recommended.
+    
+    Parameters
+    ----------
+    database : String
+        Name of the database to access. Default = 'halos.db'.
+    outfile : String
+        Name of the file to write to. Default = 'MergerTreeDB.txt'.
+    
+    Examples
+    --------
+    >>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
+    ... outfile='halos-db.txt')
+    """
     def __init__(self, database='halos.db', outfile='MergerTreeDB.txt'):
-        r"""Dump the contents of the merger tree database to a text file.
-        This is generally not recommended.
-        
-        Parameters
-        ----------
-        database : String
-            Name of the database to access. Default = 'halos.db'.
-        outfile : String
-            Name of the file to write to. Default = 'MergerTreeDB.txt'.
-        
-        Examples
-        --------
-        >>> MergerTreeTextOutput(database='/home/user/sim1-halos.db',
-        ... outfile='halos-db.txt')
-        """
         ParallelAnalysisInterface.__init__(self)
         self.database = database
         self.outfile = outfile

diff -r a8f0f480751a2caa60525c8c38886e9dbaa0b14e -r 9b336aa05056cb4f6aab9066dd24c3c188695f2f yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -59,7 +59,130 @@
 PROFILE_RADIUS_THRESHOLD = 2
 
 class HaloProfiler(ParallelAnalysisInterface):
-    "Radial profiling, filtering, and projections for halos in cosmological simulations."
+    r"""Initialize a Halo Profiler object.
+
+    In order to run the halo profiler, the Halo Profiler object must be
+    instantiated. At the minimum, the path to a parameter file
+    must be provided as the first term.
+
+    Parameters
+    ----------
+
+    dataset : string, required
+        The path to the parameter file for the dataset to be analyzed.
+    output_dir : string, optional
+        If specified, all output will be put into this path instead of
+        in the dataset directories.  Default: None.
+    halos :  {"multiple", "single"}, optional
+        For profiling more than one halo.  In this mode halos are read in
+        from a list or identified with a halo finder.  In "single" mode,
+        the one and only halo
+        center is identified automatically as the location of the peak
+        in the density field.
+        Default: "multiple".
+    halo_list_file : string, optional
+        The name of a file containing the list of halos.  The HaloProfiler
+        will  look for this file in the data directory.
+        Default: "HopAnalysis.out".
+    halo_list_format : {string, dict}
+        The format of the halo list file.  "yt_hop" for the format
+        given by yt's halo finders.  "enzo_hop" for the format written
+        by enzo_hop. "p-groupfinder"  for P-Groupfinder.  This keyword
+        can also be given in the form of a dictionary specifying the
+        column in which various properties can be found.
+        For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
+        Default: "yt_hop".
+    halo_finder_function : function
+        If halos is set to multiple and the file given by
+        halo_list_file does not exit, the halo finding function
+        specified here will be called.
+        Default: HaloFinder (yt_hop).
+    halo_finder_args : tuple
+        Args given with call to halo finder function.  Default: None.
+    halo_finder_kwargs : dict
+        kwargs given with call to halo finder function. Default: None.
+    recenter : {string, function}
+        The exact location of the sphere center can significantly affect
+        radial profiles.  The halo center loaded by the HaloProfiler will
+        typically be the dark matter center of mass calculated by a halo
+        finder.  However, this may not be the best location for centering
+        profiles of baryon quantities.  For example, one may want to center
+        on the maximum density.
+        If recenter is given as a string, one of the existing recentering
+        functions will be used:
+
+            * Min_Dark_Matter_Density : location of minimum dark matter density
+            * Max_Dark_Matter_Density : location of maximum dark matter density
+            * CoM_Dark_Matter_Density : dark matter center of mass
+            * Min_Gas_Density : location of minimum gas density
+            * Max_Gas_Density : location of maximum gas density
+            * CoM_Gas_Density : gas center of mass
+            * Min_Total_Density : location of minimum total density
+            * Max_Total_Density : location of maximum total density
+            * CoM_Total_Density : total center of mass
+            * Min_Temperature : location of minimum temperature
+            * Max_Temperature : location of maximum temperature
+
+        Alternately, a function can be supplied for custom recentering.
+        The function should take only one argument, a sphere object.  Example
+        function::
+            
+               def my_center_of_mass(data):
+                   my_x, my_y, my_z = data.quantities['CenterOfMass']()
+                   return (my_x, my_y, my_z)
+            
+        Default: None.
+    halo_radius : float
+        If no halo radii are provided in the halo list file, this
+        parameter is used to specify the radius out to which radial
+        profiles will be made.  This keyword is also
+        used when halos is set to single.  Default: 0.1.
+    radius_units : string
+        The units of halo_radius.  Default: "1" (code units).
+    n_profile_bins : int
+        The number of bins in the radial profiles.  Default: 50.
+    profile_output_dir : str
+        The subdirectory, inside the data directory, in which radial profile
+        output files will be created.  The directory will be created if it does
+        not exist.  Default: "radial_profiles".
+    projection_output_dir : str
+        The subdirectory, inside the data directory, in which projection
+        output files will be created.  The directory will be created if it does
+        not exist.  Default: "projections".
+    projection_width : float
+        The width of halo projections.  Default: 8.0.
+    projection_width_units : string
+        The units of projection_width. Default: "mpc".
+    project_at_level : {"max", int}
+        The maximum refinement level to be included in projections.
+        Default: "max" (maximum level within the dataset).
+    velocity_center  : array_like
+        The method in which the halo bulk velocity is calculated (used for
+        calculation of radial and tangential velocities.  Valid options are:
+
+            * ["bulk", "halo"] (Default): the velocity provided in
+              the halo list
+            * ["bulk", "sphere"]: the bulk velocity of the sphere
+              centered on the halo center.
+            * ["max", field]: the velocity of the cell that is the
+              location of the maximum of the field specified.
+
+    filter_quantities : array_like
+        Quantities from the original halo list file to be written out in the
+        filtered list file.  Default: ['id','center'].
+    use_critical_density : bool
+        If True, the definition of overdensity for virial quantities
+        is calculated with respect to the critical density.
+        If False, overdensity is with respect to mean matter density,
+        which is lower by a factor of Omega_M.  Default: False.
+
+    Examples
+    --------
+
+    >>> from yt.analysis_modules.halo_profiler.api import *
+    >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
+
+    """
     def __init__(self, dataset, output_dir=None,
                  halos='multiple', halo_list_file='HopAnalysis.out',
                  halo_list_format='yt_hop', halo_finder_function=parallelHF,
@@ -73,125 +196,6 @@
                  projection_width=8.0, projection_width_units='mpc', project_at_level='max',
                  velocity_center=['bulk', 'halo'], filter_quantities=['id', 'center', 'r_max'],
                  use_critical_density=False):
-        r"""Initialize a Halo Profiler object.
-
-        In order to run the halo profiler, the Halo Profiler object must be
-        instantiated. At the minimum, the path to a parameter file
-        must be provided as the first term.
-
-        Parameters
-        ----------
-
-        dataset : string, required
-            The path to the parameter file for the dataset to be analyzed.
-        output_dir : string, optional
-            If specified, all output will be put into this path instead of
-            in the dataset directories.  Default: None.
-        halos :  {"multiple", "single"}, optional
-            For profiling more than one halo.  In this mode halos are read in
-            from a list or identified with a halo finder.  In "single" mode,
-            the one and only halo
-            center is identified automatically as the location of the peak
-            in the density field.
-            Default: "multiple".
-        halo_list_file : string, optional
-            The name of a file containing the list of halos.  The HaloProfiler
-            will  look for this file in the data directory.
-            Default: "HopAnalysis.out".
-        halo_list_format : {string, dict}
-            The format of the halo list file.  "yt_hop" for the format
-            given by yt's halo finders.  "enzo_hop" for the format written
-            by enzo_hop. "p-groupfinder"  for P-Groupfinder.  This keyword
-            can also be given in the form of a dictionary specifying the
-            column in which various properties can be found.
-            For example, {"id": 0, "center": [1, 2, 3], "mass": 4, "radius": 5}.
-            Default: "yt_hop".
-        halo_finder_function : function
-            If halos is set to multiple and the file given by
-            halo_list_file does not exit, the halo finding function
-            specified here will be called.
-            Default: HaloFinder (yt_hop).
-        halo_finder_args : tuple
-            Args given with call to halo finder function.  Default: None.
-        halo_finder_kwargs : dict
-            kwargs given with call to halo finder function. Default: None.
-        recenter : {string, function}
-            The exact location of the sphere center can significantly affect
-            radial profiles.  The halo center loaded by the HaloProfiler will
-            typically be the dark matter center of mass calculated by a halo
-            finder.  However, this may not be the best location for centering
-            profiles of baryon quantities.  For example, one may want to center
-            on the maximum density.
-            If recenter is given as a string, one of the existing recentering
-            functions will be used:
-                Min_Dark_Matter_Density : location of minimum dark matter density
-                Max_Dark_Matter_Density : location of maximum dark matter density
-                CoM_Dark_Matter_Density : dark matter center of mass
-                Min_Gas_Density : location of minimum gas density
-                Max_Gas_Density : location of maximum gas density
-                CoM_Gas_Density : gas center of mass
-                Min_Total_Density : location of minimum total density
-                Max_Total_Density : location of maximum total density
-                CoM_Total_Density : total center of mass
-                Min_Temperature : location of minimum temperature
-                Max_Temperature : location of maximum temperature
-            Alternately, a function can be supplied for custom recentering.
-            The function should take only one argument, a sphere object.
-                Example function:
-                    def my_center_of_mass(data):
-                       my_x, my_y, my_z = data.quantities['CenterOfMass']()
-                       return (my_x, my_y, my_z)
-            Default: None.
-        halo_radius : float
-            If no halo radii are provided in the halo list file, this
-            parameter is used to specify the radius out to which radial
-            profiles will be made.  This keyword is also
-            used when halos is set to single.  Default: 0.1.
-        radius_units : string
-            The units of halo_radius.  Default: "1" (code units).
-        n_profile_bins : int
-            The number of bins in the radial profiles.  Default: 50.
-        profile_output_dir : str
-            The subdirectory, inside the data directory, in which radial profile
-            output files will be created.
-            The directory will be created if it does not exist.
-            Default: "radial_profiles".
-        projection_output_dir : str
-            The subdirectory, inside the data directory, in which projection
-            output files will be created.
-            The directory will be created if it does not exist.
-            Default: "projections".
-        projection_width : float
-            The width of halo projections.  Default: 8.0.
-        projection_width_units : string
-            The units of projection_width. Default: "mpc".
-        project_at_level : {"max", int}
-            The maximum refinement level to be included in projections.
-            Default: "max" (maximum level within the dataset).
-        velocity_center  : array_like
-            The method in which the halo bulk velocity is calculated (used for
-            calculation of radial and tangential velocities.  Valid options are:
-     	        * ["bulk", "halo"] (Default): the velocity provided in
-     	          the halo list
-                * ["bulk", "sphere"]: the bulk velocity of the sphere
-                  centered on the halo center.
-    	        * ["max", field]: the velocity of the cell that is the
-    	          location of the maximum of the field specified.
-        filter_quantities : array_like
-            Quantities from the original halo list file to be written out in the
-            filtered list file.  Default: ['id','center'].
-        use_critical_density : bool
-            If True, the definition of overdensity for virial quantities
-            is calculated with respect to the critical density.
-            If False, overdensity is with respect to mean matter density,
-            which is lower by a factor of Omega_M.  Default: False.
-
-        Examples
-        --------
-        >>> from yt.analysis_modules.halo_profiler.api import *
-        >>> hp = HaloProfiler("RedshiftOutput0005/RD0005")
-
-        """
         ParallelAnalysisInterface.__init__(self)
 
         self.dataset = dataset
@@ -428,6 +432,7 @@
 
         Paramters
         ---------
+
         filename : str
             If set, a file will be written with all of the filtered halos
             and the quantities returned by the filter functions.
@@ -454,6 +459,7 @@
 
         Examples
         --------
+
         >>> hp.make_profiles(filename="FilteredQuantities.out",
                  prefilters=["halo['mass'] > 1e13"])
 
@@ -703,7 +709,8 @@
         calculations and saves the output to disk.
 
         Parameters
-        ---------
+        ----------
+
         axes : array_like
             A list of the axes to project along, using the usual 0,1,2
             convention. Default=[0,1,2]
@@ -729,6 +736,7 @@
 
         Examples
         --------
+
         >>> hp.make_projections(axes=[0, 1, 2], save_cube=True,
             save_images=True, halo_list="filtered")
 
@@ -872,14 +880,17 @@
         to the provided analysis function.
 
         Parameters
-        ---------
+        ----------
+
         analysis_function : function
             A function taking two arguments, the halo dictionary, and a
             sphere object.
-            Example function to calculate total mass of halo:
+            Example function to calculate total mass of halo::
+
                 def my_analysis(halo, sphere):
                     total_mass = sphere.quantities['TotalMass']()
                     print total_mass
+            
         halo_list : {'filtered', 'all'}
             Which set of halos to make profiles of, either ones passed by the
             halo filters (if enabled/added), or all halos.
@@ -898,6 +909,7 @@
 
         Examples
         --------
+
         >>> hp.analyze_halo_spheres(my_analysis, halo_list="filtered",
                                     analysis_output_dir='special_analysis')
 

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list