[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Tue Sep 30 12:59:24 PDT 2014


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7038c5346264/
Changeset:   7038c5346264
Branch:      yt-3.0
User:        ngoldbaum
Date:        2014-09-30 19:20:04+00:00
Summary:     Merging to clear an extra head.
Affected #:  261 files

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/helper_scripts/show_fields.py
--- a/doc/helper_scripts/show_fields.py
+++ b/doc/helper_scripts/show_fields.py
@@ -1,10 +1,65 @@
 import inspect
 from yt.mods import *
+from yt.testing import *
+import numpy as np
+from yt.utilities.cosmology import \
+     Cosmology
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.frontends.stream.fields import \
+    StreamFieldInfo
+from yt.frontends.api import _frontends
+from yt.fields.derived_field import NullFunc
+import yt.frontends as frontends_module
+from yt.units.yt_array import YTArray, Unit
+from yt.units import dimensions
 
+fields, units = [], []
 
-def islambda(f):
-    return inspect.isfunction(f) and \
-           f.__name__ == (lambda: True).__name__
+for fname, (code_units, aliases, dn) in StreamFieldInfo.known_other_fields:
+    fields.append(("gas", fname))
+    units.append(code_units)
+base_ds = fake_random_ds(4, fields = fields, units = units)
+base_ds.index
+base_ds.cosmological_simulation = 1
+base_ds.cosmology = Cosmology()
+
+from yt.config import ytcfg
+ytcfg["yt","__withintesting"] = "True"
+np.seterr(all = 'ignore')
+
+def _strip_ftype(field):
+    if not isinstance(field, tuple):
+        return field
+    elif field[0] == "all":
+        return field
+    return field[1]
+
+np.random.seed(int(0x4d3d3d3))
+units = [base_ds._get_field_info(*f).units for f in fields]
+fields = [_strip_ftype(f) for f in fields]
+ds = fake_random_ds(16, fields = fields, units = units)
+ds.parameters["HydroMethod"] = "streaming"
+ds.parameters["EOSType"] = 1.0
+ds.parameters["EOSSoundSpeed"] = 1.0
+ds.conversion_factors["Time"] = 1.0
+ds.conversion_factors.update( dict((f, 1.0) for f in fields) )
+ds.gamma = 5.0/3.0
+ds.current_redshift = 0.0001
+ds.cosmological_simulation = 1
+ds.hubble_constant = 0.7
+ds.omega_matter = 0.27
+ds.omega_lambda = 0.73
+ds.cosmology = Cosmology(hubble_constant=ds.hubble_constant,
+                         omega_matter=ds.omega_matter,
+                         omega_lambda=ds.omega_lambda,
+                         unit_registry=ds.unit_registry)
+for my_unit in ["m", "pc", "AU", "au"]:
+    new_unit = "%scm" % my_unit
+    ds.unit_registry.add(new_unit, base_ds.unit_registry.lut[my_unit][0],
+                         dimensions.length, "\\rm{%s}/(1+z)" % my_unit)
+
+
 
 header = r"""
 .. _field-list:
@@ -12,10 +67,20 @@
 Field List
 ==========
 
-This is a list of all fields available in ``yt``.  It has been organized by the
-type of code that each field is supported by.  "Universal" fields are available
-everywhere, "Enzo" fields in Enzo datasets, "Orion" fields in Orion datasets,
-and so on.
+This is a list of many of the fields available in yt.  We have attempted to
+include most of the fields that are accessible through the plugin system, as 
+well as the fields that are known by the frontends, however it is possible to 
+generate many more permutations, particularly through vector operations. For 
+more information about the fields framework, see :ref:`fields`.
+
+Some fields are recognized by specific frontends only. These are typically 
+fields like density and temperature that have their own names and units in 
+the different frontend datasets. Often, these fields are aliased to their 
+yt-named counterpart fields (typically 'gas' fieldtypes). For example, in 
+the ``FLASH`` frontend, the ``dens`` field (i.e. ``(flash, dens)``) is aliased 
+to the gas field density (i.e. ``(gas, density)``), similarly ``(flash, velx)`` 
+is aliased to ``(gas, velocity_x)``, and so on. In what follows, if a field 
+is aliased it will be noted.
 
 Try using the ``ds.field_list`` and ``ds.derived_field_list`` to view the
 native and derived fields available for your dataset respectively. For example
@@ -23,46 +88,68 @@
 
 .. notebook-cell::
 
-  from yt.mods import *
-  ds = load("Enzo_64/DD0043/data0043")
+  import yt
+  ds = yt.load("Enzo_64/DD0043/data0043")
   for i in sorted(ds.field_list):
     print i
 
-.. note:: Universal fields will be overridden by a code-specific field.
+To figure out out what all of the field types here mean, see
+:ref:`known-field-types`.
 
-.. rubric:: Table of Contents
-
-.. contents::
-   :depth: 2
+.. contents:: Table of Contents
+   :depth: 1
    :local:
    :backlinks: none
+
+.. _yt-fields:
+
+Universal Fields
+----------------
 """
 
+footer = """
+
+Index of Fields
+---------------
+
+.. contents:: 
+   :depth: 3
+   :backlinks: none
+
+"""
 print header
 
 seen = []
 
+def fix_units(units, in_cgs=False):
+    unit_object = Unit(units, registry=ds.unit_registry)
+    if in_cgs:
+        unit_object = unit_object.get_cgs_equivalent()
+    latex = unit_object.latex_representation()
+    return latex.replace('\/','~')
 
 def print_all_fields(fl):
     for fn in sorted(fl):
         df = fl[fn]
         f = df._function
-        cv = df._convert_function
-        if [f, cv] in seen:
-            continue
-        seen.append([f, cv])
-        print "%s" % (df.name)
-        print "+" * len(df.name)
+        s = "%s" % (df.name,)
+        print s
+        print "^" * len(s)
         print
-        if len(df._units) > 0:
-            print "   * Units: :math:`%s`" % (df._units)
-        if len(df._projected_units) > 0:
-            print "   * Projected Units: :math:`%s`" % (df._projected_units)
+        if len(df.units) > 0:
+            # Most universal fields are in CGS except for these special fields
+            if df.name[1] in ['particle_position', 'particle_position_x', \
+                         'particle_position_y', 'particle_position_z', \
+                         'entropy', 'kT', 'metallicity', 'dx', 'dy', 'dz',\
+                         'cell_volume', 'x', 'y', 'z']:
+                print "   * Units: :math:`%s`" % fix_units(df.units)
+            else:
+                print "   * Units: :math:`%s`" % fix_units(df.units, in_cgs=True)
         print "   * Particle Type: %s" % (df.particle_type)
         print
         print "**Field Source**"
         print
-        if islambda(f):
+        if f == NullFunc:
             print "No source available."
             print
             continue
@@ -72,66 +159,73 @@
             for line in inspect.getsource(f).split("\n"):
                 print "  " + line
             print
-        print "**Convert Function Source**"
-        print
-        if islambda(cv):
-            print "No source available."
-            print
-            continue
+
+ds.index
+print_all_fields(ds.field_info)
+
+def print_frontend_field(ftype, field, ptype):
+    name = field[0]
+    units = field[1][0]
+    aliases = ["``%s``" % f for f in field[1][1]]
+    if ftype is not "particle_type":
+        ftype = "'"+ftype+"'"
+    s = "(%s, '%s')" % (ftype, name)
+    print s
+    print "^" * len(s)
+    print
+    if len(units) > 0:
+        print "   * Units: :math:`\mathrm{%s}`" % fix_units(units)
+    if len(aliases) > 0:
+        print "   * Aliased to: %s" % " ".join(aliases)
+    print "   * Particle Type: %s" % (ptype)
+    print
+
+current_frontends = [f for f in _frontends if f not in ["stream"]]
+
+for frontend in current_frontends:
+    this_f = getattr(frontends_module, frontend)
+    field_info_names = [fi for fi in dir(this_f) if "FieldInfo" in fi]
+    dataset_names = [dset for dset in dir(this_f) if "Dataset" in dset]
+
+    if frontend == "sph":
+        field_info_names = \
+          ['TipsyFieldInfo' if 'Tipsy' in d else 'SPHFieldInfo' for d in dataset_names]
+    elif frontend == "boxlib":
+        field_info_names = []
+        for d in dataset_names:
+            if "Maestro" in d:  
+                field_info_names.append("MaestroFieldInfo")
+            elif "Castro" in d: 
+                field_info_names.append("CastroFieldInfo")
+            else: 
+                field_info_names.append("BoxlibFieldInfo")
+
+    for dset_name, fi_name in zip(dataset_names, field_info_names):
+        fi = getattr(this_f, fi_name)
+        nfields = 0
+        if hasattr(fi, "known_other_fields"):
+            known_other_fields = fi.known_other_fields
+            nfields += len(known_other_fields)
         else:
-            print ".. code-block:: python"
-            print
-            for line in inspect.getsource(cv).split("\n"):
-                print "  " + line
-            print
+            known_other_fields = []
+        if hasattr(fi, "known_particle_fields"):
+            known_particle_fields = fi.known_particle_fields
+            if 'Tipsy' in fi_name:
+                known_particle_fields += tuple(fi.aux_particle_fields.values())
+            nfields += len(known_particle_fields)
+        else:
+            known_particle_fields = []
+        if nfields > 0:
+            print ".. _%s_specific_fields:\n" % dset_name.replace("Dataset", "")
+            h = "%s-Specific Fields" % dset_name.replace("Dataset", "")
+            print h
+            print "-" * len(h) + "\n"
+            for field in known_other_fields:
+                print_frontend_field(frontend, field, False)
+            for field in known_particle_fields:
+                if frontend in ["sph", "halo_catalogs", "sdf"]:
+                    print_frontend_field("particle_type", field, True)
+                else:
+                    print_frontend_field("io", field, True)
 
-
-print "Universal Field List"
-print "--------------------"
-print
-print_all_fields(FieldInfo)
-
-print "Enzo-Specific Field List"
-print "------------------------"
-print
-print_all_fields(EnzoFieldInfo)
-
-print "Orion-Specific Field List"
-print "-------------------------"
-print
-print_all_fields(OrionFieldInfo)
-
-print "FLASH-Specific Field List"
-print "-------------------------"
-print
-print_all_fields(FLASHFieldInfo)
-
-print "Athena-Specific Field List"
-print "--------------------------"
-print
-print_all_fields(AthenaFieldInfo)
-
-print "Nyx-Specific Field List"
-print "-----------------------"
-print
-print_all_fields(NyxFieldInfo)
-
-print "Chombo-Specific Field List"
-print "--------------------------"
-print
-print_all_fields(ChomboFieldInfo)
-
-print "Pluto-Specific Field List"
-print "--------------------------"
-print
-print_all_fields(PlutoFieldInfo)
-
-print "Grid-Data-Format-Specific Field List"
-print "------------------------------------"
-print
-print_all_fields(GDFFieldInfo)
-
-print "Generic-Format (Stream) Field List"
-print "----------------------------------"
-print
-print_all_fields(StreamFieldInfo)
+print footer

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -16,7 +16,7 @@
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
-BRANCH="yt-3.0" # This is the branch to which we will forcibly update.
+BRANCH="yt" # This is the branch to which we will forcibly update.
 
 if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
@@ -500,13 +500,28 @@
     fi
     [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
     touch $LIB/extracted
+    BUILD_ARGS=""
+    case $LIB in
+        *h5py*)
+            BUILD_ARGS="--hdf5=${HDF5_DIR}"
+            ;;
+        *numpy*)
+            if [ -e ${DEST_DIR}/lib/python2.7/site-packages/numpy/__init__.py ]
+            then
+                VER=$(${DEST_DIR}/bin/python -c 'from distutils.version import StrictVersion as SV; \
+                                                 import numpy; print SV(numpy.__version__) < SV("1.8.0")')
+                if [ $VER == "True" ]
+                then
+                    echo "Removing previous NumPy instance (see issue #889)"
+                    rm -rf ${DEST_DIR}/lib/python2.7/site-packages/{numpy*,*.pth}
+                fi
+            fi
+            ;;
+        *)
+            ;;
+    esac
     cd $LIB
-    if [ ! -z `echo $LIB | grep h5py` ]
-    then
-	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    else
-        ( ${DEST_DIR}/bin/python2.7 setup.py build   $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
-    fi
+    ( ${DEST_DIR}/bin/python2.7 setup.py build ${BUILD_ARGS} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
     ( ${DEST_DIR}/bin/python2.7 setup.py install    2>&1 ) 1>> ${LOG_FILE} || do_exit
     touch done
     cd ..
@@ -580,56 +595,54 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
-CYTHON='Cython-0.19.1'
-FORTHON='Forthon-0.8.11'
+CYTHON='Cython-0.20.2'
 PYX='PyX-0.12.1'
-PYTHON='Python-2.7.6'
+PYTHON='Python-2.7.8'
 BZLIB='bzip2-1.0.6'
 FREETYPE_VER='freetype-2.4.12'
-H5PY='h5py-2.1.3'
+H5PY='h5py-2.3.1'
 HDF5='hdf5-1.8.11'
-IPYTHON='ipython-2.1.0'
+IPYTHON='ipython-2.2.0'
 LAPACK='lapack-3.4.2'
 PNG=libpng-1.6.3
-MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-3.0'
-NOSE='nose-1.3.0'
-NUMPY='numpy-1.7.1'
+MATPLOTLIB='matplotlib-1.4.0'
+MERCURIAL='mercurial-3.1'
+NOSE='nose-1.3.4'
+NUMPY='numpy-1.8.2'
 PYTHON_HGLIB='python-hglib-1.0'
-PYZMQ='pyzmq-13.1.0'
+PYZMQ='pyzmq-14.3.1'
 ROCKSTAR='rockstar-0.99.6'
-SCIPY='scipy-0.12.0'
+SCIPY='scipy-0.14.0'
 SQLITE='sqlite-autoconf-3071700'
-SYMPY='sympy-0.7.3'
-TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.4'
+SYMPY='sympy-0.7.5'
+TORNADO='tornado-4.0.1'
+ZEROMQ='zeromq-4.0.4'
 ZLIB='zlib-1.2.8'
 
 # Now we dump all our SHA512 files out.
-echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '118e3ebd76f50bda8187b76654e65caab2c2c403df9b89da525c2c963dedc7b38d898ae0b92d44b278731d969a891eb3f7b5bcc138cfe3e037f175d4c87c29ec  Cython-0.20.2.tar.gz' > Cython-0.20.2.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79  Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
+echo '4b05f0a490ddee37e8fc7970403bb8b72c38e5d173703db40310e78140d9d5c5732789d69c68dbd5605a623e4582f5b9671f82b8239ecdb34ad4261019dace6a  Python-2.7.8.tgz' > Python-2.7.8.tgz.sha512
 echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
 echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
-echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'f0da1d2ac855c02fb828444d719a1b23a580adb049335f3e732ace67558a125ac8cd3b3a68ac6bf9d10aa3ab19e4672b814eb28cc8c66910750c62efb655d744  h5py-2.3.1.tar.gz' > h5py-2.3.1.tar.gz.sha512
 echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '68c15f6402cacfd623f8e2b70c22d06541de3616fdb2d502ce93cd2fdb4e7507bb5b841a414a4123264221ee5ffb0ebefbb8541f79e647fcb9f73310b4c2d460  ipython-2.1.0.tar.gz' > ipython-2.1.0.tar.gz.sha512
+echo '4953bf5e9d6d5c6ad538d07d62b5b100fd86a37f6b861238501581c0059bd4655345ca05cf395e79709c38ce4cb9c6293f5d11ac0252a618ad8272b161140d13  ipython-2.2.0.tar.gz' > ipython-2.2.0.tar.gz.sha512
 echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
 echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
-echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo '8cd387ea0d74d5ed01b58d5ef8e3fb408d4b05f7deb45a02e34fbb931fd920aafbfcb3a9b52a027ebcdb562837198637a0e51f2121c94e0fcf7f7d8c016f5342  mercurial-3.0.tar.gz' > mercurial-3.0.tar.gz.sha512
-echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
-echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '60aa386639dec17b4f579955df60f2aa7c8ccd589b3490bb9afeb2929ea418d5d1a36a0b02b8d4a6734293076e9069429956c56cf8bd099b756136f2657cf9d4  matplotlib-1.4.0.tar.gz' > matplotlib-1.4.0.tar.gz.sha512
+echo '1ee2fe7a241bf81087e55d9e4ee8fa986f41bb0655d4828d244322c18f3958a1f3111506e2df15aefcf86100b4fe530fcab2d4c041b5945599ed3b3a889d50f5  mercurial-3.1.tar.gz' > mercurial-3.1.tar.gz.sha512
+echo '19499ab08018229ea5195cdac739d6c7c247c5aa5b2c91b801cbd99bad12584ed84c5cfaaa6fa8b4893a46324571a2f8a1988a1381f4ddd58390e597bd7bdc24  nose-1.3.4.tar.gz' > nose-1.3.4.tar.gz.sha512
+echo '996e6b8e2d42f223e44660f56bf73eb8ab124f400d89218f8f5e4d7c9860ada44a4d7c54526137b0695c7a10f36e8834fbf0d42b7cb20bcdb5d5c245d673385c  numpy-1.8.2.tar.gz' > numpy-1.8.2.tar.gz.sha512
 echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
-echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
-echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '3d93a8fbd94fc3f1f90df68257cda548ba1adf3d7a819e7a17edc8681894003ac7ae6abd319473054340c11443a6a3817b931366fd7dae78e3807d549c544f8b  pyzmq-14.3.1.tar.gz' > pyzmq-14.3.1.tar.gz.sha512
+echo 'ad1278740c1dc44c5e1b15335d61c4552b66c0439325ed6eeebc5872a1c0ba3fce1dd8509116b318d01e2d41da2ee49ec168da330a7fafd22511138b29f7235d  scipy-0.14.0.tar.gz' > scipy-0.14.0.tar.gz.sha512
 echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
-echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
-echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6  zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
+echo '8a46e75abc3ed2388b5da9cb0e5874ae87580cf3612e2920b662d8f8eee8047efce5aa998eee96661d3565070b1a6b916c8bed74138b821f4e09115f14b6677d  sympy-0.7.5.tar.gz' > sympy-0.7.5.tar.gz.sha512
+echo 'a4e0231e77ebbc2885bab648b292b842cb15c84d66a1972de18cb00fcc611eae2794b872f070ab7d5af32dd0c6c1773527fe1332bd382c1821e1f2d5d76808fb  tornado-4.0.1.tar.gz' > tornado-4.0.1.tar.gz.sha512
+echo '7d70855d0537971841810a66b7a943a88304f6991ce445df19eea034aadc53dbce9d13be92bf44cfef1f3e19511a754eb01006a3968edc1ec3d1766ea4730cda  zeromq-4.0.4.tar.gz' > zeromq-4.0.4.tar.gz.sha512
 echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
 [ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -653,7 +666,6 @@
 get_ytproject $H5PY.tar.gz
 get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject $FORTHON.tar.gz
 get_ytproject $NOSE.tar.gz
 get_ytproject $PYTHON_HGLIB.tar.gz
 get_ytproject $SYMPY.tar.gz
@@ -729,7 +741,7 @@
         cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
-		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -932,7 +944,6 @@
 do_setup_py $IPYTHON
 do_setup_py $H5PY
 do_setup_py $CYTHON
-do_setup_py $FORTHON
 do_setup_py $NOSE
 do_setup_py $PYTHON_HGLIB
 do_setup_py $SYMPY
@@ -1026,7 +1037,7 @@
     echo
     echo "To get started with yt, check out the orientation:"
     echo
-    echo "    http://yt-project.org/doc/bootcamp/"
+    echo "    http://yt-project.org/doc/quickstart/"
     echo
     echo "The source for yt is located at:"
     echo "    $YT_DIR"

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/_static/custom.css
--- a/doc/source/_static/custom.css
+++ b/doc/source/_static/custom.css
@@ -5,4 +5,97 @@
 
 dd {
     margin-left: 30px;
-}
\ No newline at end of file
+}
+
+/*
+
+Collapse the navbar when its width is less than 1200 pixels.  This may need to
+be adjusted if the navbar menu changes.
+
+*/
+
+ at media (max-width: 1200px) {
+    .navbar-header {
+        float: none;
+    }
+    .navbar-toggle {
+        display: block;
+    }
+    .navbar-collapse {
+        border-top: 1px solid transparent;
+        box-shadow: inset 0 1px 0 rgba(255,255,255,0.1);
+    }
+    .navbar-collapse.collapse {
+        display: none!important;
+    }
+    .navbar-nav {
+        float: none!important;
+        margin: 7.5px -15px;
+    }
+    .navbar-nav>li {
+        float: none;
+    }
+    .navbar-nav>li>a {
+        padding-top: 10px;
+        padding-bottom: 10px;
+    }
+    /* since 3.1.0 */
+    .navbar-collapse.collapse.in { 
+        display: block!important;
+    }
+    .collapsing {
+        overflow: hidden!important;
+    }
+}
+
+/* 
+
+Sphinx code literals conflict with the notebook code tag, so we special-case
+literals that are inside text.
+
+*/
+
+p code {
+    color:  #d14;    
+    white-space: nowrap;
+    font-size: 90%;
+    background-color: #f9f2f4;
+    font-family: Menlo, Monaco, Consolas, 'Courier New', monospace;
+}
+
+/*
+
+Nicer, controllable formatting for tables that have multi-line headers.
+
+*/
+
+th.head {
+    white-space: pre;
+}
+
+/*
+
+labels have a crappy default color that is almost invisible in our doc theme so
+we use a darker color.
+
+*/
+
+.label {
+    color: #333333;
+}
+
+/*
+
+Hack to prevent internal link targets being positioned behind the navbar.
+
+See: https://github.com/twbs/bootstrap/issues/1768
+
+*/
+
+*[id]:before :not(p) {
+  display: block; 
+  content: " "; 
+  margin-top: -45px; 
+  height: 45px; 
+  visibility: hidden; 
+}

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
--- a/doc/source/analyzing/analysis_modules/Halo_Analysis.ipynb
+++ /dev/null
@@ -1,412 +0,0 @@
-{
- "metadata": {
-  "name": "",
-  "signature": "sha256:c423bcb9e3370a4581cbaaa8e764b95ec13e665aa3b46d452891d76cc79d7acf"
- },
- "nbformat": 3,
- "nbformat_minor": 0,
- "worksheets": [
-  {
-   "cells": [
-    {
-     "cell_type": "heading",
-     "level": 1,
-     "metadata": {},
-     "source": [
-      "Full Halo Analysis"
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Creating a Catalog"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Here we put everything together to perform some realistic analysis. First we load a full simulation dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "import yt\n",
-      "from yt.analysis_modules.halo_analysis.api import *\n",
-      "import tempfile\n",
-      "import shutil\n",
-      "import os\n",
-      "\n",
-      "# Create temporary directory for storing files\n",
-      "tmpdir = tempfile.mkdtemp()\n",
-      "\n",
-      "# Load the data set with the full simulation information\n",
-      "data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we load a rockstar halos binary file. This is the output from running the rockstar halo finder on the dataset loaded above. It is also possible to require the HaloCatalog to find the halos in the full simulation dataset at runtime by specifying a `finder_method` keyword."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Load the rockstar data files\n",
-      "halos_ds = yt.load('rockstar_halos/halos_0.0.bin')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "From these two loaded datasets we create a halo catalog object. No analysis is done at this point, we are simply defining an object we can add analysis tasks to. These analysis tasks will be run in the order they are added to the halo catalog object."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Instantiate a catalog using those two paramter files\n",
-      "hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds, \n",
-      "                 output_dir=os.path.join(tmpdir, 'halo_catalog'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The first analysis task we add is a filter for the most massive halos; those with masses great than $10^{14}~M_\\odot$. Note that all following analysis will only be performed on these massive halos and we will not waste computational time calculating quantities for halos we are not interested in. This is a result of adding this filter first. If we had called `add_filter` after some other `add_quantity` or `add_callback` to the halo catalog, the quantity and callback calculations would have been performed for all halos, not just those which pass the filter."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": true,
-     "input": [
-      "# Filter out less massive halos\n",
-      "hc.add_filter(\"quantity_value\", \"particle_mass\", \">\", 1e14, \"Msun\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Finding Radial Profiles"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Our first analysis goal is going to be constructing radial profiles for our halos. We would like these profiles to be in terms of the virial radius. Unfortunately we have no guarantee that values of center and virial radius recorded by the halo finder are actually physical. Therefore we should recalculate these quantities ourselves using the values recorded by the halo finder as a starting point."
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The first step is going to be creating a sphere object that we will create radial profiles along. This attaches a sphere data object to every halo left in the catalog."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# attach a sphere object to each halo whose radius extends to twice the radius of the halo\n",
-      "hc.add_callback(\"sphere\", factor=2.0)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Next we find the radial profile of the gas overdensity along the sphere object in order to find the virial radius. `radius` is the axis along which we make bins for the radial profiles. `[(\"gas\",\"overdensity\")]` is the quantity that we are profiling. This is a list so we can profile as many quantities as we want. The `weight_field` indicates how the cells should be weighted, but note that this is not a list, so all quantities will be weighted in the same way. The `accumulation` keyword indicates if the profile should be cummulative; this is useful for calculating profiles such as enclosed mass. The `storage` keyword indicates the name of the attribute of a halo where these profiles will be stored. Setting the storage keyword to \"virial_quantities_profiles\" means that the profiles will be stored in a dictionary that can be accessed by `halo.virial_quantities_profiles`."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# use the sphere to calculate radial profiles of gas density weighted by cell volume in terms of the virial radius\n",
-      "hc.add_callback(\"profile\", x_field=\"radius\",\n",
-      "                y_fields=[(\"gas\", \"overdensity\")],\n",
-      "                weight_field=\"cell_volume\", \n",
-      "                accumulation=False,\n",
-      "                storage=\"virial_quantities_profiles\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now we calculate the virial radius of halo using the sphere object. As this is a callback, not a quantity, the virial radius will not be written out with the rest of the halo properties in the final halo catalog. This also has a `profile_storage` keyword to specify where the radial profiles are stored that will allow the callback to calculate the relevant virial quantities. We supply this keyword with the same string we gave to `storage` in the last `profile` callback."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Define a virial radius for the halo.\n",
-      "hc.add_callback(\"virial_quantities\", [\"radius\"], \n",
-      "                profile_storage = \"virial_quantities_profiles\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we have calculated the virial radius, we delete the profiles we used to find it."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "hc.add_callback('delete_attribute','virial_quantities_profiles')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Now that we have calculated virial quantities we can add a new sphere that is aware of the virial radius we calculated above."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "hc.add_callback('sphere', radius_field='radius_200', factor=5,\n",
-      "                field_parameters=dict(virial_radius=('quantity', 'radius_200')))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Using this new sphere, we calculate a gas temperature profile along the virial radius, weighted by the cell mass."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "hc.add_callback('profile', 'virial_radius', [('gas','temperature')],\n",
-      "                storage='virial_profiles',\n",
-      "                weight_field='cell_mass', \n",
-      "                accumulation=False, output_dir='profiles')\n"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "As profiles are not quantities they will not automatically be written out in the halo catalog; thus in order to be reloadable we must write them out explicitly through a callback of `save_profiles`. This makes sense because they have an extra dimension for each halo along the profile axis. "
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "# Save the profiles\n",
-      "hc.add_callback(\"save_profiles\", storage=\"virial_profiles\", output_dir=\"profiles\")"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We then create the halo catalog. Remember, no analysis is done before this call to create. By adding callbacks and filters we are simply queuing up the actions we want to take that will all run now."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": true,
-     "input": [
-      "hc.create()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Reloading HaloCatalogs"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Finally we load these profiles back in and make a pretty plot. It is not strictly necessary to reload the profiles in this notebook, but we show this process here to illustrate that this step may be performed completely separately from the rest of the script. This workflow allows you to create a single script that will allow you to perform all of the analysis that requires the full dataset. The output can then be saved in a compact form where only the necessarily halo quantities are stored. You can then download this smaller dataset to a local computer and run any further non-computationally intense analysis and design the appropriate plots."
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "We can load a previously saved halo catalog by using the `load` command. We then create a `HaloCatalog` object from just this dataset."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "halos_ds =  yt.load(os.path.join(tmpdir, 'halo_catalog/halo_catalog.0.h5'))\n",
-      "\n",
-      "hc_reloaded = HaloCatalog(halos_ds=halos_ds,\n",
-      "                          output_dir=os.path.join(tmpdir, 'halo_catalog'))"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      " Just as profiles are saved seperately throught the `save_profiles` callback they also must be loaded separately using the `load_profiles` callback."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "hc_reloaded.add_callback('load_profiles', storage='virial_profiles',\n",
-      "                         output_dir='profiles')"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Calling `load` is the equivalent of calling `create` earlier, but defaults to to not saving new information. This means that the callback to `load_profiles` is not run until we call `load` here."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": true,
-     "input": [
-      "hc_reloaded.load()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
-      "Plotting Radial Profiles"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "In the future ProfilePlot will be able to properly interpret the loaded profiles of `Halo` and `HaloCatalog` objects, but this functionality is not yet implemented. In the meantime, we show a quick method of viewing a profile for a single halo."
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "The individual `Halo` objects contained in the `HaloCatalog` can be accessed through the `halo_list` attribute. This gives us access to the dictionary attached to each halo where we stored the radial profiles."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "halo = hc_reloaded.halo_list[0]\n",
-      "\n",
-      "radius = halo.virial_profiles['virial_radius']\n",
-      "temperature = halo.virial_profiles[u\"('gas', 'temperature')\"]\n",
-      "\n",
-      "# Remove output files, that are no longer needed\n",
-      "shutil.rmtree(tmpdir)"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Here we quickly use matplotlib to create a basic plot of the radial profile of this halo. When `ProfilePlot` is properly configured to accept Halos and HaloCatalogs the full range of yt plotting tools will be accessible."
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "%matplotlib inline\n",
-      "import matplotlib.pyplot as plt\n",
-      "import numpy as np\n",
-      "\n",
-      "plt.plot(np.array(radius), np.array(temperature))\n",
-      "\n",
-      "plt.semilogy()\n",
-      "plt.xlabel(r'$\\rm{R/R_{vir}}$')\n",
-      "plt.ylabel(r'$\\rm{Temperature\\/\\/(K)}$')\n",
-      "\n",
-      "plt.show()"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    }
-   ],
-   "metadata": {}
-  }
- ]
-}
\ No newline at end of file

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
--- a/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
+++ b/doc/source/analyzing/analysis_modules/Particle_Trajectories.ipynb
@@ -299,7 +299,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, `yt` will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
+      "Suppose we wanted to know the gas density along the particle trajectory, but there wasn't a particle field corresponding to that in our dataset. Never fear! If the field exists as a grid field, yt will interpolate this field to the particle positions and add the interpolated field to the trajectory. To add such a field (or any field, including additional particle fields) we can call the `add_fields` method:"
      ]
     },
     {
@@ -354,4 +354,4 @@
    "metadata": {}
   }
  ]
-}
\ No newline at end of file
+}

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/SZ_projections.ipynb
--- a/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
+++ b/doc/source/analyzing/analysis_modules/SZ_projections.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:e4db171b795d155870280ddbe8986f55f9a94ffb10783abf9d4cc2de3ec24894"
+  "signature": "sha256:2cc168b2c1737c67647aa29892c0213e7a58233fa53c809f9cd975a4306e9bc8"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -21,7 +21,7 @@
       "incorporating all of these effects, there is a library:\n",
       "SZpack ([Chluba et al 2012](http://adsabs.harvard.edu/abs/2012MNRAS.426..510C)). \n",
       "\n",
-      "The `sunyaev_zeldovich` analysis module in `yt` makes it possible\n",
+      "The `sunyaev_zeldovich` analysis module in yt makes it possible\n",
       "to make projections of the full S-Z signal given the properties of the\n",
       "thermal gas in the simulation using SZpack. SZpack has several different options for computing the S-Z signal, from full\n",
       "integrations to very good approximations.  Since a full or even a\n",
@@ -43,7 +43,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "`yt` makes projections of the various moments needed for the\n",
+      "yt makes projections of the various moments needed for the\n",
       "calculation, and then the resulting projected fields are used to\n",
       "compute the S-Z signal. In our implementation, the expansion is carried out to first-order\n",
       "terms in $T_e$ and zeroth-order terms in $\\beta_{c,\\parallel}$ by default, but terms up to second-order in can be optionally\n",
@@ -81,7 +81,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Once you have SZpack installed, making S-Z projections from ``yt``\n",
+      "Once you have SZpack installed, making S-Z projections from yt\n",
       "datasets is fairly straightforward:"
      ]
     },
@@ -89,6 +89,7 @@
      "cell_type": "code",
      "collapsed": false,
      "input": [
+      "%matplotlib inline\n",
       "import yt\n",
       "from yt.analysis_modules.sunyaev_zeldovich.api import SZProjection\n",
       "\n",
@@ -142,7 +143,7 @@
       "create images of the projected S-Z signal at each requested frequency,\n",
       "which can be accessed dict-like from the projection object (e.g.,\n",
       "`szprj[\"90_GHz\"]`). Projections of other quantities may also be\n",
-      "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard ``yt``\n",
+      "accessed; to see what fields are available call `szprj.keys()`. The methods also accept standard yt\n",
       "keywords for projections such as `center`, `width`, and `source`. The image buffer size can be controlled by setting `nx`.  \n"
      ]
     },

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/absorption_spectrum.rst
--- a/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
+++ b/doc/source/analyzing/analysis_modules/absorption_spectrum.rst
@@ -1,7 +1,8 @@
 .. _absorption_spectrum:
 
-Making an Absorption Spectrum
-=============================
+Absorption Spectrum
+===================
+
 .. sectionauthor:: Britton Smith <brittonsmith at gmail.com>
 
 Absorption line spectra, such as shown below, can be made with data created by the 
@@ -10,8 +11,9 @@
 of the ray through the cell.  Line profiles are generated using a voigt profile based 
 on the temperature field.  The lines are then shifted according to the redshift 
 recorded by the light ray tool and (optionally) the line of sight peculiar velocity.  
-Inclusion of the peculiar velocity requires setting **get_los_velocity** to True in 
-the call to :meth:`make_light_ray`.
+Inclusion of the peculiar velocity requires setting ``get_los_velocity`` to True in 
+the call to 
+:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.make_light_ray`.
 
 The spectrum generator will output a file containing the wavelength and normalized flux.  
 It will also output a text file listing all important lines.
@@ -58,7 +60,7 @@
   sp.add_line(my_label, field, wavelength, f_value, gamma, mass, label_threshold=1.e10)
 
 In the above example, the *field* argument tells the spectrum generator which field from the 
-ray data to use to calculate the column density.  The **label_threshold** keyword tells the 
+ray data to use to calculate the column density.  The ``label_threshold`` keyword tells the 
 spectrum generator to add all lines above a column density of 10 :superscript:`10` 
 cm :superscript:`-2` to the text line list.  If None is provided, as is the default, no 
 lines of this type will be added to the text list.
@@ -89,16 +91,279 @@
                                       use_peculiar_velocity=True)
 
 A spectrum will be made using the specified ray data and the wavelength and flux arrays 
-will also be returned.  If **use_peculiar_velocity** is set to False, the lines will only 
+will also be returned.  If ``use_peculiar_velocity`` is set to False, the lines will only 
 be shifted according to the redshift.
 
-Three output file formats are supported for writing out the spectrum: fits, hdf5, and ascii.  
-The file format used is based on the extension provided in the **output_file** keyword: '.fits' 
-for a fits file, '.h5' for an hdf5 file, and anything else for an ascii file.
+Three output file formats are supported for writing out the spectrum: fits, 
+hdf5, and ascii.  The file format used is based on the extension provided 
+in the ``output_file`` keyword: ``.fits`` for a fits file, 
+``.h5`` for an hdf5 file, and anything else for an ascii file.
 
 .. note:: To write out a fits file, you must install the `pyfits <http://www.stsci.edu/resources/software_hardware/pyfits>`_ module.
 
-What can I do with this?
-------------------------
+Fitting an Absorption Spectrum
+------------------------------
 
-Try :ref:`quick_start_fitting`
+.. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
+
+This tool can be used to fit absorption spectra, particularly those
+generated using the (``AbsorptionSpectrum``) tool. For more details
+on its uses and implementation please see (`Egan et al. (2013)
+<http://arxiv.org/abs/1307.2244>`_). If you find this tool useful we 
+encourage you to cite accordingly.
+
+Loading an Absorption Spectrum
+------------------------------
+
+To load an absorption spectrum created by 
+(:class:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum.AbsorptionSpectrum``), 
+we specify the output file name. It is advisable to use either an .h5
+or .fits file, rather than an ascii file to save the spectrum as rounding
+errors produced in saving to a ascii file will negatively impact fit quality.
+
+.. code-block:: python
+
+    f = h5py.File('spectrum.h5')
+    wavelength = f["wavelength"][:]
+    flux = f['flux'][:]
+    f.close()
+
+Specifying Species Properties
+-----------------------------
+
+Before fitting a spectrum, you must specify the properties of all the 
+species included when generating the spectrum.
+
+The physical properties needed for each species are the rest wavelength,
+f-value, gamma value, and atomic mass. These will be the same values
+as used to generate the initial absorption spectrum. These values are
+given in list form as some species generate multiple lines (as in the
+OVI doublet). The number of lines is also specified on its own.
+
+To fine tune the fitting procedure and give results in a minimal
+number of optimizing steps, we specify expected maximum and minimum
+values for the column density, doppler parameter, and redshift. These 
+values can be well outside the range of expected values for a typical line
+and are mostly to prevent the algorithm from fitting to negative values
+or becoming numerically unstable.
+
+Common initial guesses for doppler parameter and column density should also
+be given. These values will not affect the specific values generated by
+the fitting algorithm, provided they are in a reasonably appropriate range
+(ie: within the range given by the max and min values for the parameter).
+
+For a spectrum containing both the H Lyman-alpha line and the OVI doublet,
+we set up a fit as shown below.
+
+.. code-block:: python
+
+    HI_parameters = {'name':'HI',
+            'f': [.4164],
+            'Gamma':[6.265E8],
+            'wavelength':[1215.67],
+            'numLines':1,
+            'maxN': 1E22, 'minN':1E11,
+            'maxb': 300, 'minb':1,
+            'maxz': 6, 'minz':0,
+            'init_b':30,
+            'init_N':1E14}
+
+    OVI_parameters = {'name':'OVI',
+            'f':[.1325,.06580],
+            'Gamma':[4.148E8,4.076E8],
+            'wavelength':[1031.9261,1037.6167],
+            'numLines':2,
+            'maxN':1E17,'minN':1E11,
+            'maxb':300, 'minb':1,
+            'maxz':6, 'minz':0,
+            'init_b':20,
+            'init_N':1E12}
+
+    speciesDicts = {'HI':HI_parameters,'OVI':OVI_parameters}
+
+
+Generating Fit of Spectrum
+--------------------------
+
+After loading a spectrum and specifying the properties of the species
+used to generate the spectrum, an apporpriate fit can be generated. 
+
+.. code-block:: python
+
+    orderFits = ['OVI','HI']
+
+    fitted_lines, fitted_flux = generate_total_fit(wavelength,
+        flux, orderFits, speciesDicts)
+
+The orderFits variable is used to determine in what order the species
+should be fitted. This may affect the results of the resulting fit,
+as lines may be fit as an incorrect species. For best results, it is
+recommended to fit species the generate multiple lines first, as a fit
+will only be accepted if all of the lines are fit appropriately using
+a single set of parameters. At the moment no cross correlation between
+lines of different species is performed. 
+
+The parameters of the lines that are needed to fit the spectrum are contained 
+in the ``fitted_lines`` variable. Each species given in ``orderFits`` will
+be a key in the ``fitted_lines`` dictionary. The entry for each species 
+key will be another dictionary containing entries for 'N','b','z', and 
+'group#' which are the column density, doppler parameter, redshift,
+and associate line complex respectively. The i :superscript:`th` line 
+of a given species is then given by the parameters ``N[i]``, ``b[i]``, 
+and ``z[i]`` and is part of the same complex (and was fitted at the same time)
+as all lines with the same group number as ``group#[i]``.
+
+The ``fitted_flux`` is an ndarray of the same size as ``flux`` and 
+``wavelength`` that contains the cummulative absorption spectrum generated 
+by the lines contained in ``fitted_lines``.
+
+Saving a Spectrum Fit
+---------------------
+
+Saving the results of a fitted spectrum for further analysis is
+accomplished automatically using the h5 file format. A group
+is made for each species that is fit, and each species group has
+a group for the corresponding N, b, z, and group# values.
+
+.. _fitting_procedure:
+
+Procedure for Generating Fits
+-----------------------------
+
+.. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
+
+To generate a fit for a spectrum 
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.generate_total_fit` 
+is called.
+This function controls the identification of line complexes, the fit
+of a series of absorption lines for each appropriate species, checks of
+those fits, and returns the results of the fits.
+
+Finding Line Complexes
+----------------------
+
+Line complexes are found using the 
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.find_complexes`
+function. The process by which line complexes are found involves walking through
+the array of flux in order from minimum to maximum wavelength, and finding
+series of spatially contiguous cells whose flux is less than some limit.
+These regions are then checked in terms of an additional flux limit and size.
+The bounds of all the passing regions are then listed and returned. Those
+bounds that cover an exceptionally large region of wavelength space will be
+broken up if a suitable cut point is found. This method is only appropriate
+for noiseless spectra.
+
+The optional parameter ``complexLim`` (default = 0.999), controls the limit
+that triggers the identification of a spatially contiguous region of flux
+that could be a line complex. This number should be very close to 1 but not
+exactly equal. It should also be at least an order of magnitude closer to 1
+than the later discussed ``fitLim`` parameter, because a line complex where
+the flux of the trough is very close to the flux of the edge can be incredibly
+unstable when optimizing.
+
+The ``fitLim`` parameter controls what is the maximum flux that the trough
+of the region can have and still be considered a line complex. This 
+effectively controls the sensitivity to very low column absorbers. Default
+value is ``fitLim`` = 0.99. If a region is identified where the flux of the trough
+is greater than this value, the region is simply ignored.
+
+The ``minLength`` parameter controls the minimum number of array elements 
+that an identified region must have. This value must be greater than or
+equal to 3 as there are a minimum of 3 free parameters that must be fit.
+Default is ``minLength`` = 3.
+
+The ``maxLength`` parameter controls the maximum number of array elements
+that an identified region can have before it is split into separate regions.
+Default is ``maxLength`` = 1000. This should be adjusted based on the 
+resolution of the spectrum to remain appropriate. The value correspond
+to a wavelength of roughly 50 angstroms. 
+
+The ``splitLim`` parameter controls how exceptionally large regions are split.
+When such a region is identified by having more array elements than
+``maxLength``, the point of maximum flux (or minimum absorption) in the 
+middle two quartiles is identified. If that point has a flux greater than
+or equal to ``splitLim``, then two separate complexes are created: one from
+the lower wavelength edge to the minimum absorption point and the other from
+the minimum absorption point to the higher wavelength edge. The default
+value is ``splitLim`` =.99, but it should not drastically affect results, so
+long as the value is reasonably close to 1.
+
+Fitting a Line Complex
+----------------------
+
+After a complex is identified, it is fitted by iteratively adding and 
+optimizing a set of Voigt Profiles for a particular species until the
+region is considered successfully fit. The optimizing is accomplished
+using scipy's least squares optimizer. This requires an initial estimate
+of the parameters to be fit (column density, b-value, redshift) for each
+line.
+
+Each time a line is added, the guess of the parameters is based on
+the difference between the line complex and the fit so far. For the first line
+this just means the initial guess is based solely on the flux of the line
+complex. The column density is given by the initial column density given
+in the species parameters dictionary. If the line is saturated (some portion
+of the flux with a value less than .1) than the larger initial column density
+guess is chosen. If the flux is relatively high (all values >.9) than the
+smaller initial guess is given. These values are chosen to make optimization
+faster and more stable by being closer to the actual value, but the final
+results of fitting should not depend on them as they merely provide a
+starting point. 
+
+After the parameters for a line are optimized for the first time, the 
+optimized parameters are then used for the initial guess on subsequent 
+iterations with more lines. 
+
+The complex is considered successfully fit when the sum of the squares of 
+the difference between the flux generated from the fit and the desired flux
+profile is less than ``errBound``. ``errBound`` is related to the optional
+parameter to 
+:meth:`~yt.analysis_modules.cosmological_observation.light_ray.light_ray.LightRay.generate_total_fit`,
+``maxAvgError`` by the number of array elements in the region such that 
+``errBound`` = number of elements * ``maxAvgError``.
+
+There are several other conditions under which the cycle of adding and 
+optimizing lines will halt. If the error of the optimized fit from adding
+a line is an order of magnitude worse than the error of the fit without
+that line, then it is assumed that the fitting has become unstable and 
+the latest line is removed. Lines are also prevented from being added if
+the total number of lines is greater than the number of elements in the flux
+array being fit divided by 3. This is because there must not be more free
+parameters in a fit than the number of points to constrain them. 
+
+Checking Fit Results
+--------------------
+
+After an acceptable fit for a region is determined, there are several steps
+the algorithm must go through to validate the fits. 
+
+First, the parameters must be in a reasonable range. This is a check to make 
+sure that the optimization did not become unstable and generate a fit that
+diverges wildly outside the region where the fit was performed. This way, even
+if particular complex cannot be fit, the rest of the spectrum fitting still
+behaves as expected. The range of acceptability for each parameter is given
+in the species parameter dictionary. These are merely broad limits that will
+prevent numerical instability rather than physical limits.
+
+In cases where a single species generates multiple lines (as in the OVI 
+doublet), the fits are then checked for higher wavelength lines. Originally
+the fits are generated only considering the lowest wavelength fit to a region.
+This is because we perform the fitting of complexes in order from the lowest
+wavelength to the highest, so any contribution to a complex being fit must
+come from the lower wavelength as the higher wavelength contributions would
+already have been subtracted out after fitting the lower wavelength. 
+
+Saturated Lyman Alpha Fitting Tools
+-----------------------------------
+
+In cases where a large or saturated line (there exists a point in the complex
+where the flux is less than .1) fails to be fit properly at first pass, a
+more robust set of fitting tools is used to try and remedy the situation.
+The basic approach is to simply try a much wider range of initial parameter
+guesses in order to find the true optimization minimum, rather than getting
+stuck in a local minimum. A set of hard coded initial parameter guesses
+for Lyman alpha lines is given by the function 
+:func:`~yt.analysis_modules.absorption_spectrum.absorption_spectrum_fit.get_test_lines`.
+Also included in these parameter guesses is an an initial guess of a high
+column cool line overlapping a lower column warm line, indictive of a 
+broad Lyman alpha (BLA) absorber.

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/clump_finding.rst
--- a/doc/source/analyzing/analysis_modules/clump_finding.rst
+++ b/doc/source/analyzing/analysis_modules/clump_finding.rst
@@ -13,8 +13,8 @@
 the result of user-specified functions, such as checking for gravitational 
 boundedness.  A sample recipe can be found in :ref:`cookbook-find_clumps`.
 
-The clump finder requires a data container and a field over which the 
-contouring is to be performed.
+The clump finder requires a data object (see :ref:`data-objects`) and a field 
+over which the contouring is to be performed.
 
 .. code:: python
 

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
--- a/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/ellipsoid_analysis.rst
@@ -4,7 +4,7 @@
 =======================
 .. sectionauthor:: Geoffrey So <gso at physics.ucsd.edu>
 
-.. warning:: This is my first attempt at modifying the ``yt`` source code,
+.. warning:: This is my first attempt at modifying the yt source code,
    so the program may be bug ridden.  Please send yt-dev an email and
    address to Geoffrey So if you discover something wrong with this
    portion of the code.
@@ -12,7 +12,7 @@
 Purpose
 -------
 
-The purpose of creating this feature in ``yt`` is to analyze field
+The purpose of creating this feature in yt is to analyze field
 properties that surround dark matter haloes.  Originally, this was
 usually done with the sphere 3D container, but since many halo
 particles are linked together in a more elongated shape, I thought it
@@ -55,10 +55,10 @@
 ~~~~~~~~~~~~
 .. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.halo_finding.api import *
 
-  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
   halo_list = parallelHF(ds)
   halo_list.dump('MyHaloList')
 
@@ -66,10 +66,10 @@
 ~~~~~~~~~~~~~~~~~~~~
 .. code-block:: python
 
-  from yt.mods import *
+  import yt
   from yt.analysis_modules.halo_finding.api import *
 
-  ds=load('Enzo_64/RD0006/RedshiftOutput0006')
+  ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
   haloes = LoadHaloes(ds, 'MyHaloList')
 
 Once the halo information is saved you can load it into the data
@@ -81,12 +81,12 @@
 
 This will return 6 items
 
-  #. The center of mass as an array.
-  #. A as a float.  (Must have A>=B)
-  #. B as a float.  (Must have B>=C)
-  #. C as a float.  (Must have C > cell size)
-  #. e0 vector as an array.  (now normalized automatically in the code)
-  #. tilt as a float.
+#. The center of mass as an array.
+#. A as a float.  (Must have A>=B)
+#. B as a float.  (Must have B>=C)
+#. C as a float.  (Must have C > cell size)
+#. e0 vector as an array.  (now normalized automatically in the code)
+#. tilt as a float.
 
 The center of mass would be the same one as returned by the halo
 finder.  The A, B, C are the largest to smallest magnitude of the
@@ -155,12 +155,3 @@
   unigrid simulations, I can take "dens" from the example and divide
   it by the total number of cells to get the average density, in AMR
   one would need to do an volume weighted average instead.
-
-Thanks
-------
-
-Big thanks to the yt-user and yt-dev community that have been so
-supportive.  Special thanks to Stephen Skory for help in coding some
-functions that I'm not familiar with, Britton Smith's advice to shave
-off redundant data, Matt Turk for encouraging me to even start on
-this trek, and Dave Collins for getting ideas straight in my head.

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/fitting_procedure.rst
--- a/doc/source/analyzing/analysis_modules/fitting_procedure.rst
+++ /dev/null
@@ -1,138 +0,0 @@
-.. _fitting_procedure:
-
-Procedure for Generating Fits
-=============================
-.. sectionauthor:: Hilary Egan <hilary.egan at colorado.edu>
-
-To generate a fit for a spectrum :py:func:`generate_total_fit()` is called.
-This function controls the identification of line complexes, the fit
-of a series of absorption lines for each appropriate species, checks of
-those fits, and returns the results of the fits.
-
-
-Finding Line Complexes
-----------------------
-Line complexes are found using the :py:func:`find_complexes` function. The
-process by which line complexes are found involves walking through
-the array of flux in order from minimum to maximum wavelength, and finding
-series of spatially contiguous cells whose flux is less than some limit.
-These regions are then checked in terms of an additional flux limit and size.
-The bounds of all the passing regions are then listed and returned. Those
-bounds that cover an exceptionally large region of wavelength space will be
-broken up if a suitable cut point is found. This method is only appropriate
-for noiseless spectra.
-
-The optional parameter **complexLim** (default = 0.999), controls the limit
-that triggers the identification of a spatially contiguous region of flux
-that could be a line complex. This number should be very close to 1 but not
-exactly equal. It should also be at least an order of magnitude closer to 1
-than the later discussed **fitLim** parameter, because a line complex where
-the flux of the trough is very close to the flux of the edge can be incredibly
-unstable when optimizing.
-
-The **fitLim** parameter controls what is the maximum flux that the trough
-of the region can have and still be considered a line complex. This 
-effectively controls the sensitivity to very low column absorbers. Default
-value is **fitLim** = 0.99. If a region is identified where the flux of the trough
-is greater than this value, the region is simply ignored.
-
-The **minLength** parameter controls the minimum number of array elements 
-that an identified region must have. This value must be greater than or
-equal to 3 as there are a minimum of 3 free parameters that must be fit.
-Default is **minLength** = 3.
-
-The **maxLength** parameter controls the maximum number of array elements
-that an identified region can have before it is split into separate regions.
-Default is **maxLength** = 1000. This should be adjusted based on the 
-resolution of the spectrum to remain appropriate. The value correspond
-to a wavelength of roughly 50 angstroms. 
-
-The **splitLim** parameter controls how exceptionally large regions are split.
-When such a region is identified by having more array elements than
-**maxLength**, the point of maximum flux (or minimum absorption) in the 
-middle two quartiles is identified. If that point has a flux greater than
-or equal to **splitLim**, then two separate complexes are created: one from
-the lower wavelength edge to the minimum absorption point and the other from
-the minimum absorption point to the higher wavelength edge. The default
-value is **splitLim** =.99, but it should not drastically affect results, so
-long as the value is reasonably close to 1.
-
-
-Fitting a Line Complex
-----------------------
-
-After a complex is identified, it is fitted by iteratively adding and 
-optimizing a set of Voigt Profiles for a particular species until the
-region is considered successfully fit. The optimizing is accomplished
-using scipy's least squares optimizer. This requires an initial estimate
-of the parameters to be fit (column density, b-value, redshift) for each
-line.
-
-Each time a line is added, the guess of the parameters is based on
-the difference between the line complex and the fit so far. For the first line
-this just means the initial guess is based solely on the flux of the line
-complex. The column density is given by the initial column density given
-in the species parameters dictionary. If the line is saturated (some portion
-of the flux with a value less than .1) than the larger initial column density
-guess is chosen. If the flux is relatively high (all values >.9) than the
-smaller initial guess is given. These values are chosen to make optimization
-faster and more stable by being closer to the actual value, but the final
-results of fitting should not depend on them as they merely provide a
-starting point. 
-
-After the parameters for a line are optimized for the first time, the 
-optimized parameters are then used for the initial guess on subsequent 
-iterations with more lines. 
-
-The complex is considered successfully fit when the sum of the squares of 
-the difference between the flux generated from the fit and the desired flux
-profile is less than **errBound**. **errBound** is related to the optional
-parameter to :py:func:`generate_total_fit()`, **maxAvgError** by the number
-of array elements in the region such that **errBound** = number of elements *
-**maxAvgError**.
-
-There are several other conditions under which the cycle of adding and 
-optimizing lines will halt. If the error of the optimized fit from adding
-a line is an order of magnitude worse than the error of the fit without
-that line, then it is assumed that the fitting has become unstable and 
-the latest line is removed. Lines are also prevented from being added if
-the total number of lines is greater than the number of elements in the flux
-array being fit divided by 3. This is because there must not be more free
-parameters in a fit than the number of points to constrain them. 
-
-
-Checking Fit Results
---------------------
-
-After an acceptable fit for a region is determined, there are several steps
-the algorithm must go through to validate the fits. 
-
-First, the parameters must be in a reasonable range. This is a check to make 
-sure that the optimization did not become unstable and generate a fit that
-diverges wildly outside the region where the fit was performed. This way, even
-if particular complex cannot be fit, the rest of the spectrum fitting still
-behaves as expected. The range of acceptability for each parameter is given
-in the species parameter dictionary. These are merely broad limits that will
-prevent numerical instability rather than physical limits.
-
-In cases where a single species generates multiple lines (as in the OVI 
-doublet), the fits are then checked for higher wavelength lines. Originally
-the fits are generated only considering the lowest wavelength fit to a region.
-This is because we perform the fitting of complexes in order from the lowest
-wavelength to the highest, so any contribution to a complex being fit must
-come from the lower wavelength as the higher wavelength contributions would
-already have been subtracted out after fitting the lower wavelength. 
-
-Saturated Lyman Alpha Fitting Tools
------------------------------------
-
-In cases where a large or saturated line (there exists a point in the complex
-where the flux is less than .1) fails to be fit properly at first pass, a
-more robust set of fitting tools is used to try and remedy the situation.
-The basic approach is to simply try a much wider range of initial parameter
-guesses in order to find the true optimization minimum, rather than getting
-stuck in a local minimum. A set of hard coded initial parameter guesses
-for Lyman alpha lines is given by the function :py:func:`get_test_lines`. 
-Also included in these parameter guesses is an an initial guess of a high
-column cool line overlapping a lower column warm line, indictive of a 
-broad Lyman alpha (BLA) absorber.

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -1,3 +1,5 @@
+.. _halo-analysis:
+
 Halo Analysis
 =============
 
@@ -5,10 +7,10 @@
 and using the halo mass function.
 
 .. toctree::
-   :maxdepth: 1
+   :maxdepth: 2
 
+   halo_transition
    halo_catalogs
-   halo_transition
+   halo_finders
    halo_mass_function
    halo_merger_tree
-   halo_analysis_example

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/halo_analysis_example.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis_example.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-.. _halo-analysis-example:
-
-Using HaloCatalogs to do Analysis
----------------------------------
-
-.. notebook:: Halo_Analysis.ipynb

diff -r 2ac4060fa26ad9b322a50dd2f554ec1cf75d0a7c -r 7038c53462646440de30a610a4b58a0c1ea177b1 doc/source/analyzing/analysis_modules/halo_catalogs.rst
--- a/doc/source/analyzing/analysis_modules/halo_catalogs.rst
+++ b/doc/source/analyzing/analysis_modules/halo_catalogs.rst
@@ -1,28 +1,35 @@
 .. _halo_catalog:
 
+Halo Catalogs
+=============
+
 Creating Halo Catalogs
-======================
+----------------------
 
 In yt 3.0, operations relating to the analysis of halos (halo finding,
 merger tree creation, and individual halo analysis) are all brought 
 together into a single framework. This framework is substantially
-different from the limited framework included in yt-2.x and is only 
-backwards compatible in that output from old halo finders may be loaded.
+different from the halo analysis machinery available in yt-2.x and is 
+entirely backward incompatible.  
 For a direct translation of various halo analysis tasks using yt-2.x
 to yt-3.0 please see :ref:`halo-transition`.
 
 A catalog of halos can be created from any initial dataset given to halo 
 catalog through data_ds. These halos can be found using friends-of-friends,
 HOP, and Rockstar. The finder_method keyword dictates which halo finder to
-use. The available arguments are 'fof', 'hop', and'rockstar'. For more
-details on the relative differences between these halo finders see 
+use. The available arguments are :ref:`fof`, :ref:`hop`, and :ref:`rockstar`. 
+For more details on the relative differences between these halo finders see 
 :ref:`halo_finding`.
 
+The class which holds all of the halo information is the 
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+
 .. code-block:: python
 
-   from yt.mods import *
+   import yt
    from yt.analysis_modules.halo_analysis.api import HaloCatalog
-   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, finder_method='hop')
 
 A halo catalog may also be created from already run rockstar outputs. 
@@ -34,7 +41,7 @@
 
 .. code-block:: python
 
-   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
+   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
    hc = HaloCatalog(halos_ds=halos_ds)
 
 Although supplying only the binary output of the rockstar halo finder 
@@ -45,30 +52,31 @@
 
 .. code-block:: python
 
-   halos_ds = load(path+'rockstar_halos/halos_0.0.bin')
-   data_ds = load('Enzo_64/RD0006/RedshiftOutput0006')
+   halos_ds = yt.load(path+'rockstar_halos/halos_0.0.bin')
+   data_ds = yt.load('Enzo_64/RD0006/RedshiftOutput0006')
    hc = HaloCatalog(data_ds=data_ds, halos_ds=halos_ds)
 
-A data container can also be supplied via keyword data_source, 
+A data object can also be supplied via the keyword ``data_source``, 
 associated with either dataset, to control the spatial region in 
 which halo analysis will be performed.
 
 Analysis Using Halo Catalogs
-============================
+----------------------------
 
-Analysis is done by adding actions to the HaloCatalog. Each action is 
-represented by a callback function that will be run on each halo. 
+Analysis is done by adding actions to the 
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog`.
+Each action is represented by a callback function that will be run on each halo. 
 There are three types of actions:
 
-    - Filters
-    - Quantities
-    - Callbacks
+* Filters
+* Quantities
+* Callbacks
 
 All interaction with this analysis can be performed by importing from 
 halo_analysis.
 
 Filters
--------
+^^^^^^^
 
 A filter is a function that returns True or False. If the return value 
 is True, any further queued analysis will proceed and the halo in 
@@ -86,7 +94,7 @@
 added by the user by defining a function that accepts a halo object as 
 the first argument and then adding it as an available filter. If you 
 think that your filter may be of use to the general community, you can 
-add it to yt/analysis_modules/halo_analysis/halo_filters.py and issue a 
+add it to ``yt/analysis_modules/halo_analysis/halo_filters.py`` and issue a 
 pull request.
 
 An example of defining your own filter:
@@ -108,20 +116,27 @@
    hc.add_filter("my_filter")
 
 Quantities
-----------
+^^^^^^^^^^
 
 A quantity is a call back that returns a value or values. The return values 
 are stored within the halo object in a dictionary called “quantities.” At 
 the end of the analysis, all of these quantities will be written to disk as 
-the final form of the generated “halo catalog.”
+the final form of the generated halo catalog.
 
 Quantities may be available in the initial fields found in the halo catalog, 
 or calculated from a function after supplying a definition. An example 
 definition of center of mass is shown below. Currently available quantities 
 are center_of_mass and bulk_velocity. Their definitions are available in 
-yt/analysis_modules/halo_analysis/halo_quantities.py . If you think that 
+``yt/analysis_modules/halo_analysis/halo_quantities.py``. If you think that 
 your quantity may be of use to the general community, add it to 
-halo_quantities.py and issue a pull request.
+``halo_quantities.py`` and issue a pull request.  Default halo quantities are:
+
+* ``particle_identifier`` -- Halo ID (e.g. 0 to N)
+* ``particle_mass`` -- Mass of halo
+* ``particle_position_x`` -- Location of halo
+* ``particle_position_y`` -- Location of halo
+* ``particle_position_z`` -- Location of halo
+* ``virial_radius`` -- Virial radius of halo
 
 An example of adding a quantity:
 
@@ -147,7 +162,7 @@
    hc.add_quantity("my_quantity") 
 
 Callbacks
----------
+^^^^^^^^^
 
 A callback is actually the super class for quantities and filters and 
 is a general purpose function that does something, anything, to a Halo 
@@ -156,14 +171,14 @@
 anything.
 
 An example of using a pre-defined callback where we create a sphere for 
-each halo with a radius that is twice the saved “radius”.
+each halo with a radius that is twice the saved ``radius``.
 
 .. code-block:: python
 
    hc.add_callback("sphere", factor=2.0)
     
 Currently available callbacks are located in 
-yt/analysis_modules/halo_analysis/halo_callbacks.py. New callbacks may 
+``yt/analysis_modules/halo_analysis/halo_callbacks.py``. New callbacks may 
 be added by using the syntax shown below. If you think that your 
 callback may be of use to the general community, add it to 
 halo_callbacks.py and issue a pull request
@@ -185,7 +200,7 @@
    hc.add_callback("my_callback")
 
 Running Analysis
-================
+----------------
 
 After all callbacks, quantities, and filters have been added, the 
 analysis begins with a call to HaloCatalog.create.
@@ -199,10 +214,12 @@
 contents of their quantities dicts will be retained for creating the 
 final catalog. The looping over halos uses a call to parallel_objects 
 allowing the user to control how many processors work on each halo. 
-The final catalog is written to disk int the output directory given 
-when the HaloCatalog object was created.
+The final catalog is written to disk in the output directory given 
+when the 
+:class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+object was created.
 
-All callbacks, quantities, and filters are stored in an “actions” list, 
+All callbacks, quantities, and filters are stored in an actions list, 
 meaning that they are executed in the same order in which they were added. 
 This enables the use of simple, reusable, single action callbacks that 
 depend on each other. This also prevents unecessary computation by allowing 
@@ -210,23 +227,25 @@
 is not warranted.
 
 Saving and Reloading Halo Catalogs
-==================================
+----------------------------------
 
-A HaloCatalog saved to disk can be reloaded as yt dataset with the 
+A :class:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog` 
+saved to disk can be reloaded as a yt dataset with the 
 standard call to load. Any side data, such as profiles, can be reloaded 
-with a load_profiles callback and a call to HaloCatalog.load.
+with a ``load_profiles`` callback and a call to 
+:func:`~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog.load`.
 
 .. code-block:: python
 
-   hds = load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
+   hds = yt.load(path+"halo_catalogs/catalog_0046/catalog_0046.0.h5")
    hc = HaloCatalog(halos_ds=hds,
                     output_dir="halo_catalogs/catalog_0046")
    hc.add_callback("load_profiles", output_dir="profiles",
                    filename="virial_profiles")
    hc.load()
 
-Summary
-=======
+Worked Example of Halo Catalog in Action
+----------------------------------------
 
 For a full example of how to use these methods together see 
-:doc:`halo_analysis_example`.
+:ref:`halo-analysis-example`.

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/da689254a3be/
Changeset:   da689254a3be
Branch:      yt
User:        ngoldbaum
Date:        2014-09-30 19:26:09+00:00
Summary:     Merging the yt-3.0 branch into the yt branch
Affected #:  11 files

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/api.py
--- /dev/null
+++ b/yt/frontends/charm/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.charm
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      CharmGrid, \
+      CharmHierarchy, \
+      CharmStaticOutput
+
+from .fields import \
+      CharmFieldInfo, \
+      add_charm_field
+
+from .io import \
+      IOHandlerCharmHDF5

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/data_structures.py
--- /dev/null
+++ b/yt/frontends/charm/data_structures.py
@@ -0,0 +1,341 @@
+"""
+Data structures for Charm.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import re
+import os
+import weakref
+import numpy as np
+
+from collections import \
+     defaultdict
+from string import \
+     strip, \
+     rstrip
+from stat import \
+     ST_CTIME
+
+from .definitions import \
+     charm2enzoDict, \
+     yt2charmFieldsDict, \
+     parameterDict \
+
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+     AMRGridPatch
+from yt.data_objects.hierarchy import \
+     AMRHierarchy
+from yt.data_objects.static_output import \
+     StaticOutput
+from yt.utilities.definitions import \
+     mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+     parallel_root_only
+from yt.utilities.io_handler import \
+    io_registry
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+from .fields import \
+    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
+    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
+    KnownCharmFields
+
+class CharmGrid(AMRGridPatch):
+    _id_offset = 0
+    __slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, start, stop):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.ActiveDimensions = stop - start + 1
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        if self.start_index != None:
+            return self.start_index
+        if self.Parent == []:
+            iLE = self.LeftEdge - self.pf.domain_left_edge
+            start_index = iLE / self.dds
+            return np.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        self.dds = self.hierarchy.dds_list[self.Level]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+class CharmHierarchy(AMRHierarchy):
+
+    grid = CharmGrid
+    _data_file = None
+
+    def __init__(self,pf,data_style='charm_hdf5'):
+        self.domain_left_edge = pf.domain_left_edge
+        self.domain_right_edge = pf.domain_right_edge
+        self.data_style = data_style
+
+        if pf.dimensionality == 1:
+            self.data_style = "charm1d_hdf5"
+        if pf.dimensionality == 2:
+            self.data_style = "charm2d_hdf5"
+
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = os.path.abspath(
+            self.parameter_file.parameter_filename)
+        self.directory = pf.fullpath
+        self._handle = pf._handle
+
+        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
+        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
+        AMRHierarchy.__init__(self,pf,data_style)
+        self._read_particles()
+
+    def _read_particles(self):
+        
+        self.num_particles = 0
+        particles_per_grid = []
+        for key, val in self._handle.items():
+            if key.startswith('level'):
+                level_particles = val['particles:offsets'][:]
+                self.num_particles += level_particles.sum()
+                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
+
+        for i, grid in enumerate(self.grids):
+            self.grids[i].NumberOfParticles = particles_per_grid[i]
+            self.grid_particle_count[i] = particles_per_grid[i]
+
+        assert(self.num_particles == self.grid_particle_count.sum())
+
+    def _detect_fields(self):
+        self.field_list = []
+        for key, val in self._handle.attrs.items():
+            if key.startswith("component"):
+                self.field_list.append(val)
+          
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 0
+        for lev in self._levels:
+            self.num_grids += self._handle[lev]['Processors'].len()
+
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+
+        grids = []
+        self.dds_list = []
+        i = 0
+        D = self.parameter_file.dimensionality
+        for lev_index, lev in enumerate(self._levels):
+            level_number = int(re.match('level_(\d+)',lev).groups()[0])
+            try:
+                boxes = f[lev]['boxes'].value
+            except KeyError:
+                boxes = f[lev]['particles:boxes'].value
+            dx = f[lev].attrs['dx']
+            self.dds_list.append(dx * np.ones(3))
+
+            if D == 1:
+                self.dds_list[lev_index][1] = 1.0
+                self.dds_list[lev_index][2] = 1.0
+
+            if D == 2:
+                self.dds_list[lev_index][2] = 1.0
+
+            for level_id, box in enumerate(boxes):
+                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
+                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
+                
+                if D == 1:
+                    si = np.concatenate((si, [0.0, 0.0]))
+                    ei = np.concatenate((ei, [0.0, 0.0]))
+
+                if D == 2:
+                    si = np.concatenate((si, [0.0]))
+                    ei = np.concatenate((ei, [0.0]))
+
+                pg = self.grid(len(grids),self,level=level_number,
+                               start = si, stop = ei)
+                grids.append(pg)
+                grids[-1]._level_id = level_id
+                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
+                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
+                self.grid_particle_count[i] = 0
+                self.grid_dimensions[i] = ei - si + 1
+                i += 1
+        self.grids = np.empty(len(grids), dtype='object')
+        for gi, g in enumerate(grids): self.grids[gi] = g
+
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+            g._setup_dx()
+
+        for g in self.grids:
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class CharmStaticOutput(StaticOutput):
+    _hierarchy_class = CharmHierarchy
+    _fieldinfo_fallback = CharmFieldInfo
+    _fieldinfo_known = KnownCharmFields
+
+    def __init__(self, filename, data_style='charm_hdf5',
+                 storage_filename = None, ini_filename = None):
+        self._handle = h5py.File(filename,'r')
+        self.current_time = self._handle['level_0'].attrs['time']
+        self.ini_filename = ini_filename
+        self.fullplotdir = os.path.abspath(filename)
+        StaticOutput.__init__(self,filename,data_style)
+        self.storage_filename = storage_filename
+        self.cosmological_simulation = False
+
+        # These are parameters that I very much wish to get rid of.
+        self.parameters["HydroMethod"] = 'charm' # always PPM DE
+        self.parameters["DualEnergyFormalism"] = 0 
+        self.parameters["EOSType"] = -1 # default
+
+    def __del__(self):
+        self._handle.close()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        seconds = 1 #self["Time"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = seconds / sec_conversion[unit]
+        for key in yt2charmFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    def _parse_parameter_file(self):
+        
+        self.unique_identifier = \
+                               int(os.stat(self.parameter_filename)[ST_CTIME])
+        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
+        self.domain_left_edge = self.__calc_left_edge()
+        self.domain_right_edge = self.__calc_right_edge()
+        self.domain_dimensions = self.__calc_domain_dimensions()
+
+        if self.dimensionality == 1:
+            self._fieldinfo_fallback = Charm1DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
+
+        if self.dimensionality == 2:
+            self._fieldinfo_fallback = Charm2DFieldInfo
+            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
+            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
+            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
+        
+        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
+        self.periodicity = (True,) * self.dimensionality
+
+    def __calc_left_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        return LE
+
+    def __calc_right_edge(self):
+        fileh = self._handle
+        dx0 = fileh['/level_0'].attrs['dx']
+        D = self.dimensionality
+        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return RE
+
+    def __calc_domain_dimensions(self):
+        fileh = self._handle
+        D = self.dimensionality
+        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
+        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
+        return R_index - L_index
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            valid = "Charm_global" in fileh["/"]
+            fileh.close()
+            return valid
+        except:
+            pass
+        return False
+
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/definitions.py
--- /dev/null
+++ b/yt/frontends/charm/definitions.py
@@ -0,0 +1,54 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                                 }
+
+charm2enzoDict = {"GAMMA": "Gamma",
+                  "Ref_ratio": "RefineBy"
+                                    }
+
+yt2charmFieldsDict = {}
+charm2ytFieldsDict = {}
+

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/fields.py
--- /dev/null
+++ b/yt/frontends/charm/fields.py
@@ -0,0 +1,150 @@
+"""
+Charm-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    NullFunc, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+import numpy as np
+
+CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = CharmFieldInfo.add_field
+
+KnownCharmFields = FieldInfoContainer()
+add_charm_field = KnownCharmFields.add_field
+
+add_charm_field("potential", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("potential")],
+                units=r"")
+
+add_charm_field("density", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("density")],
+                units=r"")
+
+add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_x")],
+                units=r"")
+
+add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_y")],
+                units=r"")
+
+add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("gravitational_field_z")],
+                units=r"")
+
+def _Density(field, data):
+    return data["density"]
+add_field("Density",function=_Density, take_log=True,
+          units=r'\rm{g}/\rm{cm^3}')
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+        
+    return _Particles
+
+_particle_field_list = ["mass",
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "velocity_x",
+                        "velocity_y",
+                        "velocity_z",
+                        "acceleration_x",
+                        "acceleration_y",
+                        "acceleration_z"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)
+
+#do overrides for 2D
+
+Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_2d_field = Charm2DFieldInfo.add_field
+
+def _gravitational_field_z(field, data):
+    return np.zeros(data['gravitational_field_x'].shape,
+                    dtype='float64')
+add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
+
+def _particle_position_z(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+add_charm_2d_field("particle_position_z", function=_particle_position_z)
+
+def _particle_velocity_z(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
+
+def _particle_acceleration_z(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+#do overrides for 1D
+
+Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
+add_charm_1d_field = Charm1DFieldInfo.add_field
+
+def _gravitational_field_y(field, data):
+    return np.zeros(data['gravitational_field_y'].shape,
+                    dtype='float64')
+
+def _particle_position_y(field, data):
+    return np.zeros(data['particle_position_x'].shape, dtype='float64')
+
+def _particle_velocity_y(field, data):
+    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
+
+def _particle_acceleration_y(field, data):
+    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
+
+add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
+add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
+
+add_charm_1d_field("particle_position_z", function=_particle_position_z)
+add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
+add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
+
+add_charm_1d_field("particle_position_y", function=_particle_position_y)
+add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
+add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/io.py
--- /dev/null
+++ b/yt/frontends/charm/io.py
@@ -0,0 +1,127 @@
+"""
+The data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+import h5py
+import os
+import re
+import numpy as np
+
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerCharmHDF5(BaseIOHandler):
+    _data_style = "charm_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'position_z': 2,
+                                      'velocity_x': 3,
+                                      'velocity_y': 4,
+                                      'velocity_z': 5,
+                                      'acceleration_x': 6,
+                                      'acceleration_y': 7,
+                                      'acceleration_z': 8,
+                                      'mass': 9}
+
+    _field_dict = None
+    @property
+    def field_dict(self):
+        if self._field_dict is not None:
+            return self._field_dict
+        field_dict = {}
+        for key, val in self._handle.attrs.items():
+            if key.startswith('component_'):
+                comp_number = int(re.match('component_(\d)', key).groups()[0])
+                field_dict[val] = comp_number
+        self._field_dict = field_dict
+        return self._field_dict
+        
+    def _read_field_names(self, grid):
+        ncomp = int(self._handle['/'].attrs['num_components'])
+        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
+    
+    def _read_data(self,grid,field):
+
+        lstring = 'level_%i' % grid.Level
+        lev = self._handle[lstring]
+        dims = grid.ActiveDimensions
+        boxsize = dims.prod()
+        
+        grid_offset = lev[self._offset_string][grid._level_id]
+        start = grid_offset+self.field_dict[field]*boxsize
+        stop = start + boxsize
+        data = lev[self._data_string][start:stop]
+        
+        return data.reshape(dims, order='F')
+
+    def _read_particles(self, grid, name):
+
+        field_index = self._particle_field_index[name]
+        lev = 'level_%s' % grid.Level
+
+        particles_per_grid = self._handle[lev]['particles:offsets'].value
+        items_per_particle = len(self._particle_field_index)
+
+        # compute global offset position
+        offsets = items_per_particle * np.cumsum(particles_per_grid)
+        offsets = np.append(np.array([0]), offsets)
+        offsets = np.array(offsets, dtype=np.int64)
+
+        # convert between the global grid id and the id on this level            
+        grid_levels = np.array([g.Level for g in self.pf.h.grids])
+        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
+        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
+        lo = grid.id - grid_level_offset
+        hi = lo + 1
+
+        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
+        return data[field_index::items_per_particle]
+
+class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm2d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'position_y': 1,
+                                      'velocity_x': 2,
+                                      'velocity_y': 3,
+                                      'acceleration_x': 4,
+                                      'acceleration_y': 5,
+                                      'mass': 6}
+
+
+class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
+    _data_style = "charm1d_hdf5"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+        self._particle_field_index = {'position_x': 0,
+                                      'velocity_x': 1,
+                                      'acceleration_x': 2,
+                                      'mass': 3}
\ No newline at end of file

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/charm/setup.py
--- /dev/null
+++ b/yt/frontends/charm/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('charm', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -41,7 +41,11 @@
             self.ghost = np.array(self.ghost)
         except KeyError:
             # assume zero ghosts if outputGhosts not present
+<<<<<<< local
             self.ghost = np.zeros(self.dim)
+=======
+            self.ghost = np.array(self.dim)
+>>>>>>> other
 
     _field_dict = None
     @property
@@ -79,7 +83,11 @@
         dims = grid.ActiveDimensions
         shape = grid.ActiveDimensions + 2*self.ghost
         boxsize = shape.prod()
+<<<<<<< local
 
+=======
+        
+>>>>>>> other
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
@@ -179,25 +187,46 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
+<<<<<<< local
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
+=======
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+>>>>>>> other
         self.dim = 2
         self._read_ghost_info()
+<<<<<<< local
 
+=======
+>>>>>>> other
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
+<<<<<<< local
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
+=======
+    def __init__(self, pf, *args, **kwargs):
+        BaseIOHandler.__init__(self, pf, *args, **kwargs)
+        self.pf = pf
+>>>>>>> other
         self.dim = 1
+<<<<<<< local
         self._handle = ds._handle
+=======
+        self._handle = pf._handle   
+>>>>>>> other
         self._read_ghost_info()
+<<<<<<< local
 
 
 class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
@@ -268,6 +297,8 @@
 
     return index
 
+=======
+>>>>>>> other
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -49,7 +49,7 @@
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"
- at requires_ds(zp)
+ at requires_pf(zp)
 def test_zp():
     ds = data_dir_load(zp)
     yield assert_equal, str(ds), "plt32.2d.hdf5"

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -26,6 +26,8 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
+    config.add_subpackage("pluto")
+    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")

diff -r f0faeac1992516e8c8ddf806da5e73e7b464e5eb -r da689254a3be1762f3211a674731fb8f73b50bdc yt/visualization/particle_plotter.py
--- /dev/null
+++ b/yt/visualization/particle_plotter.py
@@ -0,0 +1,496 @@
+"""
+This is a simple mechanism for interfacing with Particle plots
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+
+import __builtin__
+import base64
+import os
+import types
+
+from functools import wraps
+from itertools import izip
+import matplotlib
+import numpy as np
+import cStringIO
+
+from .base_plot_types import ImagePlotMPL
+from .plot_container import \
+    ImagePlotContainer, \
+    log_transform, linear_transform
+from yt.data_objects.profiles import \
+    create_profile
+from yt.utilities.exceptions import \
+    YTNotInsideNotebook
+from yt.utilities.logger import ytLogger as mylog
+import _mpl_imports as mpl
+from yt.funcs import \
+    ensure_list, \
+    get_image_suffix, \
+    get_ipython_api_version
+from yt.units.unit_object import Unit
+
+def get_canvas(name):
+    suffix = get_image_suffix(name)
+    
+    if suffix == '':
+        suffix = '.png'
+    if suffix == ".png":
+        canvas_cls = mpl.FigureCanvasAgg
+    elif suffix == ".pdf":
+        canvas_cls = mpl.FigureCanvasPdf
+    elif suffix in (".eps", ".ps"):
+        canvas_cls = mpl.FigureCanvasPS
+    else:
+        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
+        canvas_cls = mpl.FigureCanvasAgg
+    return canvas_cls
+
+def invalidate_plot(f):
+    @wraps(f)
+    def newfunc(*args, **kwargs):
+        rv = f(*args, **kwargs)
+        args[0]._plot_valid = False
+        args[0]._setup_plots()
+        return rv
+    return newfunc
+
+class FigureContainer(dict):
+    def __init__(self):
+        super(FigureContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = mpl.matplotlib.figure.Figure((10, 8))
+        self[key] = figure
+        return self[key]
+
+class AxesContainer(dict):
+    def __init__(self, fig_container):
+        self.fig_container = fig_container
+        self.ylim = {}
+        super(AxesContainer, self).__init__()
+
+    def __missing__(self, key):
+        figure = self.fig_container[key]
+        self[key] = figure.add_subplot(111)
+        return self[key]
+
+    def __setitem__(self, key, value):
+        super(AxesContainer, self).__setitem__(key, value)
+        self.ylim[key] = (None, None)
+
+def sanitize_label(label, nprofiles):
+    label = ensure_list(label)
+    
+    if len(label) == 1:
+        label = label * nprofiles
+    
+    if len(label) != nprofiles:
+        raise RuntimeError("Number of labels must match number of profiles")
+
+    for l in label:
+        if l is not None and not isinstance(l, basestring):
+            raise RuntimeError("All labels must be None or a string")
+
+    return label
+
+class ParticlePlot(object):
+    r"""
+    Create a particle scatter plot from a data source.
+
+    Given a data object (all_data, region, sphere, etc.), an x field, 
+    and a y field (or fields), this will a scatter plot with one marker
+    for each particle.
+
+    Parameters
+    ----------
+    data_source : AMR3DData Object
+        The data object to be profiled, such as all_data, region, or 
+        sphere.
+    x_field : str
+        The field to plot on the x-axis.
+    y_fields : str
+        The field to plot on the y-axis.
+    plot_spec : dict or list of dicts
+        A dictionary or list of dictionaries containing plot keyword 
+        arguments.  For example, dict(color="blue", linestyle=".").
+        Default: None.
+
+    Examples
+    --------
+
+    This creates profiles of a single dataset.
+
+    >>> import yt
+    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
+    >>> ad = ds.all_data()
+    >>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
+    ...                    weight_field="cell_mass",
+    ...                    plot_spec=dict(color='red', linestyle="--"))
+    >>> plot.save()
+
+    This creates profiles from a time series object.
+
+    >>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
+    >>> es.get_time_series()
+
+    >>> profiles = []
+    >>> labels = []
+    >>> plot_specs = []
+    >>> for ds in es[-4:]:
+    ...     ad = ds.all_data()
+    ...     profiles.append(create_profile(ad, ["density"],
+    ...                                    fields=["temperature",
+    ...                                            "velocity_x"]))
+    ...     labels.append(ds.current_redshift)
+    ...     plot_specs.append(dict(linestyle="--", alpha=0.7))
+    >>>
+    >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
+    ...                                  plot_specs=plot_specs)
+    >>> plot.save()
+
+    Use plot_line_property to change line properties of one or all profiles.
+    
+    """
+    x_log = None
+    y_log = None
+    z_log = None
+    x_title = None
+    y_title = None
+    _plot_valid = False
+
+    def __init__(self, data_source, x_field, y_field,
+                 plot_spec=None):
+
+        if plot_spec is None:
+            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
+
+        self.data_source = data_source
+        self.x_field = x_field
+        self.y_field = y_field
+        self.plot_spec = plot_spec
+
+        self.x_data = self.data_source[x_field]
+        self.y_data = self.data_source[y_field]
+        
+        self.figure = mpl.matplotlib.figure.Figure((10, 8))
+        self.axis = self.figure.add_subplot(111)
+        self._setup_plots()
+
+    def save(self, name=None):
+        r"""
+         Saves the scatter plot to disk.
+
+         Parameters
+         ----------
+         name : str
+             The output file keyword.
+
+         """
+        if not self._plot_valid:
+            self._setup_plots()
+        unique = set(self.figures.values())
+        if len(unique) < len(self.figures):
+            iters = izip(xrange(len(unique)), sorted(unique))
+        else:
+            iters = self.figures.iteritems()
+        if name is None:
+            if len(self.profiles) == 1:
+                prefix = self.profiles[0].ds
+            else:
+                prefix = "Multi-data"
+            name = "%s.png" % prefix
+        suffix = get_image_suffix(name)
+        prefix = name[:name.rfind(suffix)]
+        xfn = self.profiles[0].x_field
+        if isinstance(xfn, types.TupleType):
+            xfn = xfn[1]
+        if not suffix:
+            suffix = ".png"
+        canvas_cls = get_canvas(name)
+        fns = []
+        for uid, fig in iters:
+            if isinstance(uid, types.TupleType):
+                uid = uid[1]
+            canvas = canvas_cls(fig)
+            fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
+            mylog.info("Saving %s", fns[-1])
+            canvas.print_figure(fns[-1])
+        return fns
+
+    def show(self):
+        r"""This will send any existing plots to the IPython notebook.
+        function name.
+
+        If yt is being run from within an IPython session, and it is able to
+        determine this, this function will send any existing plots to the
+        notebook for display.
+
+        If yt can't determine if it's inside an IPython session, it will raise
+        YTNotInsideNotebook.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.show()
+
+        """
+        if "__IPYTHON__" in dir(__builtin__):
+            api_version = get_ipython_api_version()
+            if api_version in ('0.10', '0.11'):
+                self._send_zmq()
+            else:
+                from IPython.display import display
+                display(self)
+        else:
+            raise YTNotInsideNotebook
+
+    def _repr_html_(self):
+        """Return an html representation of the plot object. Will display as a
+        png for each WindowPlotMPL instance in self.plots"""
+        ret = ''
+        canvas = mpl.FigureCanvasAgg(self.figure)
+        f = cStringIO.StringIO()
+        canvas.print_figure(f)
+        f.seek(0)
+        img = base64.b64encode(f.read())
+        ret += '<img src="data:image/png;base64,%s"><br>' % img
+        return ret
+
+    def _setup_plots(self):
+        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
+                       **self.plot_spec)
+
+        xscale = self._get_field_log(self.x_field)
+        yscale = self._get_field_log(self.y_field)
+
+        xtitle = self._get_field_title(self.x_field)
+        ytitle = self._get_field_title(self.y_field)
+
+        self.axis.set_xscale(xscale)
+        self.axis.set_yscale(yscale)
+
+        self.axis.set_xlabel(xtitle)
+        self.axis.set_ylabel(ytitle)
+
+        self._plot_valid = True
+
+    @invalidate_plot
+    def set_line_property(self, property, value, index=None):
+        r"""
+        Set properties for one or all lines to be plotted.
+
+        Parameters
+        ----------
+        property : str
+            The line property to be set.
+        value : str, int, float
+            The value to set for the line property.
+        index : int
+            The index of the profile in the list of profiles to be 
+            changed.  If None, change all plotted lines.
+            Default : None.
+
+        Examples
+        --------
+
+        Change all the lines in a plot
+        plot.set_line_property("linestyle", "-")
+
+        Change a single line.
+        plot.set_line_property("linewidth", 4, index=0)
+        
+        """
+        if index is None:
+            specs = self.plot_spec
+        else:
+            specs = [self.plot_spec[index]]
+        for spec in specs:
+            spec[property] = value
+        return self
+
+    @invalidate_plot
+    def set_log(self, field, log):
+        """set a field to log or linear.
+
+        Parameters
+        ----------
+        field : string
+            the field to set a transform
+        log : boolean
+            Log on/off.
+        """
+        if field == "all":
+            self.x_log = log
+            for field in self.profiles[0].field_data.keys():
+                self.y_log[field] = log
+        else:
+            field, = self.profiles[0].data_source._determine_fields([field])
+            if field == self.profiles[0].x_field:
+                self.x_log = log
+            elif field in self.profiles[0].field_data:
+                self.y_log[field] = log
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_unit(self, field, unit):
+        """Sets a new unit for the requested field
+
+        Parameters
+        ----------
+        field : string
+           The name of the field that is to be changed.
+
+        new_unit : string or Unit object
+           The name of the new unit.
+        """
+        for profile in self.profiles:
+            if field == profile.x_field[1]:
+                profile.set_x_unit(unit)
+            elif field in self.profiles[0].field_map:
+                profile.set_field_unit(field, unit)
+            else:
+                raise KeyError("Field %s not in profile plot!" % (field))
+        return self
+
+    @invalidate_plot
+    def set_xlim(self, xmin=None, xmax=None):
+        """Sets the limits of the bin field
+
+        Parameters
+        ----------
+        
+        xmin : float or None
+          The new x minimum.  Defaults to None, which leaves the xmin
+          unchanged.
+
+        xmax : float or None
+          The new x maximum.  Defaults to None, which leaves the xmax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
+        >>> pp.set_xlim(1e-29, 1e-24)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if xmin is None:
+                xmi = p.x_bins.min()
+            else:
+                xmi = xmin
+            if xmax is None:
+                xma = p.x_bins.max()
+            else:
+                xma = xmax
+            extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
+            units = {p.x_field: str(p.x.units)}
+            for field in p.field_map.values():
+                units[field] = str(p.field_data[field].units)
+            self.profiles[i] = \
+                create_profile(p.data_source, p.x_field,
+                               n_bins=len(p.x_bins)-2,
+                               fields=p.field_map.values(),
+                               weight_field=p.weight_field,
+                               accumulation=p.accumulation,
+                               fractional=p.fractional,
+                               extrema=extrema, units=units)
+        return self
+
+    @invalidate_plot
+    def set_ylim(self, field, ymin=None, ymax=None):
+        """Sets the plot limits for the specified field we are binning.
+
+        Parameters
+        ----------
+
+        field : string or field tuple
+
+        The field that we want to adjust the plot limits for.
+        
+        ymin : float or None
+          The new y minimum.  Defaults to None, which leaves the ymin
+          unchanged.
+
+        ymax : float or None
+          The new y maximum.  Defaults to None, which leaves the ymax
+          unchanged.
+
+        Examples
+        --------
+
+        >>> import yt
+        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
+        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
+        >>> pp.set_ylim('temperature', 1e4, 1e6)
+        >>> pp.save()
+
+        """
+        for i, p in enumerate(self.profiles):
+            if field is 'all':
+                fields = self.axes.keys()
+            else:
+                fields = ensure_list(field)
+            for profile in self.profiles:
+                for field in profile.data_source._determine_fields(fields):
+                    if field in profile.field_map:
+                        field = profile.field_map[field]
+                    self.axes.ylim[field] = (ymin, ymax)
+                    # Continue on to the next profile.
+                    break
+        return self
+
+    def _get_field_log(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        do_log = fi.take_log
+        scales = {True: 'log', False: 'linear'}
+        return scales[do_log]
+
+    def _get_field_label(self, field, field_info, field_unit, fractional=False):
+        field_unit = field_unit.latex_representation()
+        field_name = field_info.display_name
+        if isinstance(field, tuple): field = field[1]
+        if field_name is None:
+            field_name = r'$\rm{'+field+r'}$'
+            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
+        elif field_name.find('$') == -1:
+            field_name = field_name.replace(' ','\/')
+            field_name = r'$\rm{'+field_name+r'}$'
+        if fractional:
+            label = field_name + r'$\rm{\/Probability\/Density}$'
+        elif field_unit is None or field_unit == '' or field_unit == '1':
+            label = field_name
+        else:
+            field_unit = field_unit.latex_representation()
+            label = field_name+r'$\/\/('+field_unit+r')$'
+        return label
+
+    def _get_field_title(self, field):
+        ds = self.data_source.ds
+        f, = self.data_source._determine_fields([field])
+        fi = ds._get_field_info(*f)
+        field_unit = Unit(fi.units, registry=self.data_source.ds.unit_registry)
+        title = self._get_field_label(field, fi, field_unit)
+        return title


https://bitbucket.org/yt_analysis/yt/commits/d0e473b58537/
Changeset:   d0e473b58537
Branch:      yt
User:        ngoldbaum
Date:        2014-09-30 19:29:52+00:00
Summary:     Removing code that should not make it into the yt branch
Affected #:  11 files

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/api.py
--- a/yt/frontends/charm/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.charm
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CharmGrid, \
-      CharmHierarchy, \
-      CharmStaticOutput
-
-from .fields import \
-      CharmFieldInfo, \
-      add_charm_field
-
-from .io import \
-      IOHandlerCharmHDF5

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/data_structures.py
--- a/yt/frontends/charm/data_structures.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""
-Data structures for Charm.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import re
-import os
-import weakref
-import numpy as np
-
-from collections import \
-     defaultdict
-from string import \
-     strip, \
-     rstrip
-from stat import \
-     ST_CTIME
-
-from .definitions import \
-     charm2enzoDict, \
-     yt2charmFieldsDict, \
-     parameterDict \
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-     AMRGridPatch
-from yt.data_objects.hierarchy import \
-     AMRHierarchy
-from yt.data_objects.static_output import \
-     StaticOutput
-from yt.utilities.definitions import \
-     mpc_conversion, sec_conversion
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-     parallel_root_only
-from yt.utilities.io_handler import \
-    io_registry
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import \
-    CharmFieldInfo, Charm2DFieldInfo, Charm1DFieldInfo, \
-    add_charm_field, add_charm_2d_field, add_charm_1d_field, \
-    KnownCharmFields
-
-class CharmGrid(AMRGridPatch):
-    _id_offset = 0
-    __slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, hierarchy, level, start, stop):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                              hierarchy = hierarchy)
-        self.Parent = []
-        self.Children = []
-        self.Level = level
-        self.ActiveDimensions = stop - start + 1
-
-    def get_global_startindex(self):
-        """
-        Return the integer starting index for each dimension at the current
-        level.
-
-        """
-        if self.start_index != None:
-            return self.start_index
-        if self.Parent == []:
-            iLE = self.LeftEdge - self.pf.domain_left_edge
-            start_index = iLE / self.dds
-            return np.rint(start_index).astype('int64').ravel()
-        pdx = self.Parent[0].dds
-        start_index = (self.Parent[0].get_global_startindex()) + \
-            np.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
-        self.start_index = (start_index*self.pf.refine_by).astype('int64').ravel()
-        return self.start_index
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        self.dds = self.hierarchy.dds_list[self.Level]
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-class CharmHierarchy(AMRHierarchy):
-
-    grid = CharmGrid
-    _data_file = None
-
-    def __init__(self,pf,data_style='charm_hdf5'):
-        self.domain_left_edge = pf.domain_left_edge
-        self.domain_right_edge = pf.domain_right_edge
-        self.data_style = data_style
-
-        if pf.dimensionality == 1:
-            self.data_style = "charm1d_hdf5"
-        if pf.dimensionality == 2:
-            self.data_style = "charm2d_hdf5"
-
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        # for now, the hierarchy file is the parameter file!
-        self.hierarchy_filename = os.path.abspath(
-            self.parameter_file.parameter_filename)
-        self.directory = pf.fullpath
-        self._handle = pf._handle
-
-        self.float_type = self._handle['Chombo_global'].attrs['testReal'].dtype.name
-        self._levels = [key for key in self._handle.keys() if key.startswith('level')]
-        AMRHierarchy.__init__(self,pf,data_style)
-        self._read_particles()
-
-    def _read_particles(self):
-        
-        self.num_particles = 0
-        particles_per_grid = []
-        for key, val in self._handle.items():
-            if key.startswith('level'):
-                level_particles = val['particles:offsets'][:]
-                self.num_particles += level_particles.sum()
-                particles_per_grid = np.concatenate((particles_per_grid, level_particles))
-
-        for i, grid in enumerate(self.grids):
-            self.grids[i].NumberOfParticles = particles_per_grid[i]
-            self.grid_particle_count[i] = particles_per_grid[i]
-
-        assert(self.num_particles == self.grid_particle_count.sum())
-
-    def _detect_fields(self):
-        self.field_list = []
-        for key, val in self._handle.attrs.items():
-            if key.startswith("component"):
-                self.field_list.append(val)
-          
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _count_grids(self):
-        self.num_grids = 0
-        for lev in self._levels:
-            self.num_grids += self._handle[lev]['Processors'].len()
-
-    def _parse_hierarchy(self):
-        f = self._handle # shortcut
-
-        grids = []
-        self.dds_list = []
-        i = 0
-        D = self.parameter_file.dimensionality
-        for lev_index, lev in enumerate(self._levels):
-            level_number = int(re.match('level_(\d+)',lev).groups()[0])
-            try:
-                boxes = f[lev]['boxes'].value
-            except KeyError:
-                boxes = f[lev]['particles:boxes'].value
-            dx = f[lev].attrs['dx']
-            self.dds_list.append(dx * np.ones(3))
-
-            if D == 1:
-                self.dds_list[lev_index][1] = 1.0
-                self.dds_list[lev_index][2] = 1.0
-
-            if D == 2:
-                self.dds_list[lev_index][2] = 1.0
-
-            for level_id, box in enumerate(boxes):
-                si = np.array([box['lo_%s' % ax] for ax in 'ijk'[:D]])
-                ei = np.array([box['hi_%s' % ax] for ax in 'ijk'[:D]])
-                
-                if D == 1:
-                    si = np.concatenate((si, [0.0, 0.0]))
-                    ei = np.concatenate((ei, [0.0, 0.0]))
-
-                if D == 2:
-                    si = np.concatenate((si, [0.0]))
-                    ei = np.concatenate((ei, [0.0]))
-
-                pg = self.grid(len(grids),self,level=level_number,
-                               start = si, stop = ei)
-                grids.append(pg)
-                grids[-1]._level_id = level_id
-                self.grid_left_edge[i] = self.dds_list[lev_index]*si.astype(self.float_type)
-                self.grid_right_edge[i] = self.dds_list[lev_index]*(ei.astype(self.float_type)+1)
-                self.grid_particle_count[i] = 0
-                self.grid_dimensions[i] = ei - si + 1
-                i += 1
-        self.grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self.grids[gi] = g
-
-    def _populate_grid_objects(self):
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-
-        for g in self.grids:
-            g.Children = self._get_grid_children(g)
-            for g1 in g.Children:
-                g1.Parent.append(g)
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
-    def _setup_data_io(self):
-        self.io = io_registry[self.data_style](self.parameter_file)
-
-class CharmStaticOutput(StaticOutput):
-    _hierarchy_class = CharmHierarchy
-    _fieldinfo_fallback = CharmFieldInfo
-    _fieldinfo_known = KnownCharmFields
-
-    def __init__(self, filename, data_style='charm_hdf5',
-                 storage_filename = None, ini_filename = None):
-        self._handle = h5py.File(filename,'r')
-        self.current_time = self._handle['level_0'].attrs['time']
-        self.ini_filename = ini_filename
-        self.fullplotdir = os.path.abspath(filename)
-        StaticOutput.__init__(self,filename,data_style)
-        self.storage_filename = storage_filename
-        self.cosmological_simulation = False
-
-        # These are parameters that I very much wish to get rid of.
-        self.parameters["HydroMethod"] = 'charm' # always PPM DE
-        self.parameters["DualEnergyFormalism"] = 0 
-        self.parameters["EOSType"] = -1 # default
-
-    def __del__(self):
-        self._handle.close()
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2charmFieldsDict:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    def _parse_parameter_file(self):
-        
-        self.unique_identifier = \
-                               int(os.stat(self.parameter_filename)[ST_CTIME])
-        self.dimensionality = self._handle['Chombo_global/'].attrs['SpaceDim']
-        self.domain_left_edge = self.__calc_left_edge()
-        self.domain_right_edge = self.__calc_right_edge()
-        self.domain_dimensions = self.__calc_domain_dimensions()
-
-        if self.dimensionality == 1:
-            self._fieldinfo_fallback = Charm1DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0, 0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0, 1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1, 1]))
-
-        if self.dimensionality == 2:
-            self._fieldinfo_fallback = Charm2DFieldInfo
-            self.domain_left_edge = np.concatenate((self.domain_left_edge, [0.0]))
-            self.domain_right_edge = np.concatenate((self.domain_right_edge, [1.0]))
-            self.domain_dimensions = np.concatenate((self.domain_dimensions, [1]))
-        
-        self.refine_by = self._handle['/level_0'].attrs['ref_ratio']
-        self.periodicity = (True,) * self.dimensionality
-
-    def __calc_left_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        LE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        return LE
-
-    def __calc_right_edge(self):
-        fileh = self._handle
-        dx0 = fileh['/level_0'].attrs['dx']
-        D = self.dimensionality
-        RE = dx0*((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return RE
-
-    def __calc_domain_dimensions(self):
-        fileh = self._handle
-        D = self.dimensionality
-        L_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[0:D])
-        R_index = ((np.array(list(fileh['/level_0'].attrs['prob_domain'])))[D:] + 1)
-        return R_index - L_index
-
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        try:
-            fileh = h5py.File(args[0],'r')
-            valid = "Charm_global" in fileh["/"]
-            fileh.close()
-            return valid
-        except:
-            pass
-        return False
-
-    @parallel_root_only
-    def print_key_parameters(self):
-        for a in ["current_time", "domain_dimensions", "domain_left_edge",
-                  "domain_right_edge"]:
-            if not hasattr(self, a):
-                mylog.error("Missing %s in parameter file definition!", a)
-                continue
-            v = getattr(self, a)
-            mylog.info("Parameters: %-25s = %s", a, v)

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/definitions.py
--- a/yt/frontends/charm/definitions.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                                 }
-
-charm2enzoDict = {"GAMMA": "Gamma",
-                  "Ref_ratio": "RefineBy"
-                                    }
-
-yt2charmFieldsDict = {}
-charm2ytFieldsDict = {}
-

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/fields.py
--- a/yt/frontends/charm/fields.py
+++ /dev/null
@@ -1,150 +0,0 @@
-"""
-Charm-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-import numpy as np
-
-CharmFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CharmFieldInfo.add_field
-
-KnownCharmFields = FieldInfoContainer()
-add_charm_field = KnownCharmFields.add_field
-
-add_charm_field("potential", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("potential")],
-                units=r"")
-
-add_charm_field("density", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("density")],
-                units=r"")
-
-add_charm_field("gravitational_field_x", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_x")],
-                units=r"")
-
-add_charm_field("gravitational_field_y", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_y")],
-                units=r"")
-
-add_charm_field("gravitational_field_z", function=NullFunc, take_log=False,
-                validators = [ValidateDataField("gravitational_field_z")],
-                units=r"")
-
-def _Density(field, data):
-    return data["density"]
-add_field("Density",function=_Density, take_log=True,
-          units=r'\rm{g}/\rm{cm^3}')
-
-def particle_func(p_field, dtype='float64'):
-    def _Particles(field, data):
-        io = data.hierarchy.io
-        if not data.NumberOfParticles > 0:
-            return np.array([], dtype=dtype)
-        else:
-            return io._read_particles(data, p_field).astype(dtype)
-        
-    return _Particles
-
-_particle_field_list = ["mass",
-                        "position_x",
-                        "position_y",
-                        "position_z",
-                        "velocity_x",
-                        "velocity_y",
-                        "velocity_z",
-                        "acceleration_x",
-                        "acceleration_y",
-                        "acceleration_z"]
-
-for pf in _particle_field_list:
-    pfunc = particle_func("%s" % (pf))
-    add_field("particle_%s" % pf, function=pfunc,
-              validators = [ValidateSpatial(0)],
-              particle_type=True)
-
-def _ParticleMass(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles
-
-def _ParticleMassMsun(field, data):
-    particles = data["particle_mass"].astype('float64')
-    return particles/1.989e33
-
-add_field("ParticleMass",
-          function=_ParticleMass, validators=[ValidateSpatial(0)],
-          particle_type=True)
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True)
-
-#do overrides for 2D
-
-Charm2DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_2d_field = Charm2DFieldInfo.add_field
-
-def _gravitational_field_z(field, data):
-    return np.zeros(data['gravitational_field_x'].shape,
-                    dtype='float64')
-add_charm_2d_field("gravitational_field_z", function=_gravitational_field_z)
-
-def _particle_position_z(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-add_charm_2d_field("particle_position_z", function=_particle_position_z)
-
-def _particle_velocity_z(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-add_charm_2d_field("particle_velocity_z", function=_particle_velocity_z)
-
-def _particle_acceleration_z(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-add_charm_2d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-#do overrides for 1D
-
-Charm1DFieldInfo = FieldInfoContainer.create_with_fallback(CharmFieldInfo)
-add_charm_1d_field = Charm1DFieldInfo.add_field
-
-def _gravitational_field_y(field, data):
-    return np.zeros(data['gravitational_field_y'].shape,
-                    dtype='float64')
-
-def _particle_position_y(field, data):
-    return np.zeros(data['particle_position_x'].shape, dtype='float64')
-
-def _particle_velocity_y(field, data):
-    return np.zeros(data['particle_velocity_x'].shape, dtype='float64')
-
-def _particle_acceleration_y(field, data):
-    return np.zeros(data['particle_acceleration_x'].shape, dtype='float64')
-
-add_charm_1d_field("gravitational_field_z", function=_gravitational_field_z)
-add_charm_1d_field("gravitational_field_y", function=_gravitational_field_y)
-
-add_charm_1d_field("particle_position_z", function=_particle_position_z)
-add_charm_1d_field("particle_velocity_z", function=_particle_velocity_z)
-add_charm_1d_field("particle_acceleration_z", function=_particle_acceleration_z)
-
-add_charm_1d_field("particle_position_y", function=_particle_position_y)
-add_charm_1d_field("particle_velocity_y", function=_particle_velocity_y)
-add_charm_1d_field("particle_acceleration_y", function=_particle_acceleration_y)
\ No newline at end of file

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/io.py
--- a/yt/frontends/charm/io.py
+++ /dev/null
@@ -1,127 +0,0 @@
-"""
-The data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-import h5py
-import os
-import re
-import numpy as np
-
-from yt.utilities.io_handler import \
-           BaseIOHandler
-
-class IOHandlerCharmHDF5(BaseIOHandler):
-    _data_style = "charm_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'position_z': 2,
-                                      'velocity_x': 3,
-                                      'velocity_y': 4,
-                                      'velocity_z': 5,
-                                      'acceleration_x': 6,
-                                      'acceleration_y': 7,
-                                      'acceleration_z': 8,
-                                      'mass': 9}
-
-    _field_dict = None
-    @property
-    def field_dict(self):
-        if self._field_dict is not None:
-            return self._field_dict
-        field_dict = {}
-        for key, val in self._handle.attrs.items():
-            if key.startswith('component_'):
-                comp_number = int(re.match('component_(\d)', key).groups()[0])
-                field_dict[val] = comp_number
-        self._field_dict = field_dict
-        return self._field_dict
-        
-    def _read_field_names(self, grid):
-        ncomp = int(self._handle['/'].attrs['num_components'])
-        fns = [c[1] for c in f['/'].attrs.items()[-ncomp-1:-1]]
-    
-    def _read_data(self,grid,field):
-
-        lstring = 'level_%i' % grid.Level
-        lev = self._handle[lstring]
-        dims = grid.ActiveDimensions
-        boxsize = dims.prod()
-        
-        grid_offset = lev[self._offset_string][grid._level_id]
-        start = grid_offset+self.field_dict[field]*boxsize
-        stop = start + boxsize
-        data = lev[self._data_string][start:stop]
-        
-        return data.reshape(dims, order='F')
-
-    def _read_particles(self, grid, name):
-
-        field_index = self._particle_field_index[name]
-        lev = 'level_%s' % grid.Level
-
-        particles_per_grid = self._handle[lev]['particles:offsets'].value
-        items_per_particle = len(self._particle_field_index)
-
-        # compute global offset position
-        offsets = items_per_particle * np.cumsum(particles_per_grid)
-        offsets = np.append(np.array([0]), offsets)
-        offsets = np.array(offsets, dtype=np.int64)
-
-        # convert between the global grid id and the id on this level            
-        grid_levels = np.array([g.Level for g in self.pf.h.grids])
-        grid_ids    = np.array([g.id    for g in self.pf.h.grids])
-        grid_level_offset = grid_ids[np.where(grid_levels == grid.Level)[0][0]]
-        lo = grid.id - grid_level_offset
-        hi = lo + 1
-
-        data = self._handle[lev]['particles:data'][offsets[lo]:offsets[hi]]
-        return data[field_index::items_per_particle]
-
-class IOHandlerCharm2DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm2d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'position_y': 1,
-                                      'velocity_x': 2,
-                                      'velocity_y': 3,
-                                      'acceleration_x': 4,
-                                      'acceleration_y': 5,
-                                      'mass': 6}
-
-
-class IOHandlerCharm1DHDF5(IOHandlerCharmHDF5):
-    _data_style = "charm1d_hdf5"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
-
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
-        self._particle_field_index = {'position_x': 0,
-                                      'velocity_x': 1,
-                                      'acceleration_x': 2,
-                                      'mass': 3}
\ No newline at end of file

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/charm/setup.py
--- a/yt/frontends/charm/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('charm', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/chombo/io.py
--- a/yt/frontends/chombo/io.py
+++ b/yt/frontends/chombo/io.py
@@ -41,11 +41,7 @@
             self.ghost = np.array(self.ghost)
         except KeyError:
             # assume zero ghosts if outputGhosts not present
-<<<<<<< local
             self.ghost = np.zeros(self.dim)
-=======
-            self.ghost = np.array(self.dim)
->>>>>>> other
 
     _field_dict = None
     @property
@@ -83,11 +79,7 @@
         dims = grid.ActiveDimensions
         shape = grid.ActiveDimensions + 2*self.ghost
         boxsize = shape.prod()
-<<<<<<< local
 
-=======
-        
->>>>>>> other
         grid_offset = lev[self._offset_string][grid._level_id]
         start = grid_offset+self.field_dict[field]*boxsize
         stop = start + boxsize
@@ -187,46 +179,25 @@
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-<<<<<<< local
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
         self._handle = ds._handle
-=======
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf, *args, **kwargs)
-        self.pf = pf
-        self._handle = pf._handle
->>>>>>> other
         self.dim = 2
         self._read_ghost_info()
-<<<<<<< local
 
-=======
->>>>>>> other
 
 class IOHandlerChombo1DHDF5(IOHandlerChomboHDF5):
     _dataset_type = "chombo1d_hdf5"
     _offset_string = 'data:offsets=0'
     _data_string = 'data:datatype=0'
 
-<<<<<<< local
     def __init__(self, ds, *args, **kwargs):
         BaseIOHandler.__init__(self, ds, *args, **kwargs)
         self.ds = ds
-=======
-    def __init__(self, pf, *args, **kwargs):
-        BaseIOHandler.__init__(self, pf, *args, **kwargs)
-        self.pf = pf
->>>>>>> other
         self.dim = 1
-<<<<<<< local
         self._handle = ds._handle
-=======
-        self._handle = pf._handle   
->>>>>>> other
         self._read_ghost_info()
-<<<<<<< local
 
 
 class IOHandlerPlutoHDF5(IOHandlerChomboHDF5):
@@ -297,8 +268,6 @@
 
     return index
 
-=======
->>>>>>> other
 
 class IOHandlerOrion2HDF5(IOHandlerChomboHDF5):
     _dataset_type = "orion_chombo_native"

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/chombo/tests/test_outputs.py
--- a/yt/frontends/chombo/tests/test_outputs.py
+++ b/yt/frontends/chombo/tests/test_outputs.py
@@ -49,7 +49,7 @@
 _zp_fields = ("rhs", "phi", "gravitational_field_x",
               "gravitational_field_y")
 zp = "ZeldovichPancake/plt32.2d.hdf5"
- at requires_pf(zp)
+ at requires_ds(zp)
 def test_zp():
     ds = data_dir_load(zp)
     yield assert_equal, str(ds), "plt32.2d.hdf5"

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -27,7 +27,6 @@
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
     config.add_subpackage("pluto")
-    config.add_subpackage("charm")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")

diff -r da689254a3be1762f3211a674731fb8f73b50bdc -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 yt/visualization/particle_plotter.py
--- a/yt/visualization/particle_plotter.py
+++ /dev/null
@@ -1,496 +0,0 @@
-"""
-This is a simple mechanism for interfacing with Particle plots
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-
-import __builtin__
-import base64
-import os
-import types
-
-from functools import wraps
-from itertools import izip
-import matplotlib
-import numpy as np
-import cStringIO
-
-from .base_plot_types import ImagePlotMPL
-from .plot_container import \
-    ImagePlotContainer, \
-    log_transform, linear_transform
-from yt.data_objects.profiles import \
-    create_profile
-from yt.utilities.exceptions import \
-    YTNotInsideNotebook
-from yt.utilities.logger import ytLogger as mylog
-import _mpl_imports as mpl
-from yt.funcs import \
-    ensure_list, \
-    get_image_suffix, \
-    get_ipython_api_version
-from yt.units.unit_object import Unit
-
-def get_canvas(name):
-    suffix = get_image_suffix(name)
-    
-    if suffix == '':
-        suffix = '.png'
-    if suffix == ".png":
-        canvas_cls = mpl.FigureCanvasAgg
-    elif suffix == ".pdf":
-        canvas_cls = mpl.FigureCanvasPdf
-    elif suffix in (".eps", ".ps"):
-        canvas_cls = mpl.FigureCanvasPS
-    else:
-        mylog.warning("Unknown suffix %s, defaulting to Agg", suffix)
-        canvas_cls = mpl.FigureCanvasAgg
-    return canvas_cls
-
-def invalidate_plot(f):
-    @wraps(f)
-    def newfunc(*args, **kwargs):
-        rv = f(*args, **kwargs)
-        args[0]._plot_valid = False
-        args[0]._setup_plots()
-        return rv
-    return newfunc
-
-class FigureContainer(dict):
-    def __init__(self):
-        super(FigureContainer, self).__init__()
-
-    def __missing__(self, key):
-        figure = mpl.matplotlib.figure.Figure((10, 8))
-        self[key] = figure
-        return self[key]
-
-class AxesContainer(dict):
-    def __init__(self, fig_container):
-        self.fig_container = fig_container
-        self.ylim = {}
-        super(AxesContainer, self).__init__()
-
-    def __missing__(self, key):
-        figure = self.fig_container[key]
-        self[key] = figure.add_subplot(111)
-        return self[key]
-
-    def __setitem__(self, key, value):
-        super(AxesContainer, self).__setitem__(key, value)
-        self.ylim[key] = (None, None)
-
-def sanitize_label(label, nprofiles):
-    label = ensure_list(label)
-    
-    if len(label) == 1:
-        label = label * nprofiles
-    
-    if len(label) != nprofiles:
-        raise RuntimeError("Number of labels must match number of profiles")
-
-    for l in label:
-        if l is not None and not isinstance(l, basestring):
-            raise RuntimeError("All labels must be None or a string")
-
-    return label
-
-class ParticlePlot(object):
-    r"""
-    Create a particle scatter plot from a data source.
-
-    Given a data object (all_data, region, sphere, etc.), an x field, 
-    and a y field (or fields), this will a scatter plot with one marker
-    for each particle.
-
-    Parameters
-    ----------
-    data_source : AMR3DData Object
-        The data object to be profiled, such as all_data, region, or 
-        sphere.
-    x_field : str
-        The field to plot on the x-axis.
-    y_fields : str
-        The field to plot on the y-axis.
-    plot_spec : dict or list of dicts
-        A dictionary or list of dictionaries containing plot keyword 
-        arguments.  For example, dict(color="blue", linestyle=".").
-        Default: None.
-
-    Examples
-    --------
-
-    This creates profiles of a single dataset.
-
-    >>> import yt
-    >>> ds = yt.load("enzo_tiny_cosmology/DD0046/DD0046")
-    >>> ad = ds.all_data()
-    >>> plot = ProfilePlot(ad, "density", ["temperature", "velocity_x"],
-    ...                    weight_field="cell_mass",
-    ...                    plot_spec=dict(color='red', linestyle="--"))
-    >>> plot.save()
-
-    This creates profiles from a time series object.
-
-    >>> es = yt.simulation("AMRCosmology.enzo", "Enzo")
-    >>> es.get_time_series()
-
-    >>> profiles = []
-    >>> labels = []
-    >>> plot_specs = []
-    >>> for ds in es[-4:]:
-    ...     ad = ds.all_data()
-    ...     profiles.append(create_profile(ad, ["density"],
-    ...                                    fields=["temperature",
-    ...                                            "velocity_x"]))
-    ...     labels.append(ds.current_redshift)
-    ...     plot_specs.append(dict(linestyle="--", alpha=0.7))
-    >>>
-    >>> plot = ProfilePlot.from_profiles(profiles, labels=labels,
-    ...                                  plot_specs=plot_specs)
-    >>> plot.save()
-
-    Use plot_line_property to change line properties of one or all profiles.
-    
-    """
-    x_log = None
-    y_log = None
-    z_log = None
-    x_title = None
-    y_title = None
-    _plot_valid = False
-
-    def __init__(self, data_source, x_field, y_field,
-                 plot_spec=None):
-
-        if plot_spec is None:
-            plot_spec = {'c':'b', 'marker':'.', 'linestyle':'None', 'markersize':8}
-
-        self.data_source = data_source
-        self.x_field = x_field
-        self.y_field = y_field
-        self.plot_spec = plot_spec
-
-        self.x_data = self.data_source[x_field]
-        self.y_data = self.data_source[y_field]
-        
-        self.figure = mpl.matplotlib.figure.Figure((10, 8))
-        self.axis = self.figure.add_subplot(111)
-        self._setup_plots()
-
-    def save(self, name=None):
-        r"""
-         Saves the scatter plot to disk.
-
-         Parameters
-         ----------
-         name : str
-             The output file keyword.
-
-         """
-        if not self._plot_valid:
-            self._setup_plots()
-        unique = set(self.figures.values())
-        if len(unique) < len(self.figures):
-            iters = izip(xrange(len(unique)), sorted(unique))
-        else:
-            iters = self.figures.iteritems()
-        if name is None:
-            if len(self.profiles) == 1:
-                prefix = self.profiles[0].ds
-            else:
-                prefix = "Multi-data"
-            name = "%s.png" % prefix
-        suffix = get_image_suffix(name)
-        prefix = name[:name.rfind(suffix)]
-        xfn = self.profiles[0].x_field
-        if isinstance(xfn, types.TupleType):
-            xfn = xfn[1]
-        if not suffix:
-            suffix = ".png"
-        canvas_cls = get_canvas(name)
-        fns = []
-        for uid, fig in iters:
-            if isinstance(uid, types.TupleType):
-                uid = uid[1]
-            canvas = canvas_cls(fig)
-            fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix))
-            mylog.info("Saving %s", fns[-1])
-            canvas.print_figure(fns[-1])
-        return fns
-
-    def show(self):
-        r"""This will send any existing plots to the IPython notebook.
-        function name.
-
-        If yt is being run from within an IPython session, and it is able to
-        determine this, this function will send any existing plots to the
-        notebook for display.
-
-        If yt can't determine if it's inside an IPython session, it will raise
-        YTNotInsideNotebook.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = ProfilePlot(ds.all_data(), 'density', 'temperature')
-        >>> pp.show()
-
-        """
-        if "__IPYTHON__" in dir(__builtin__):
-            api_version = get_ipython_api_version()
-            if api_version in ('0.10', '0.11'):
-                self._send_zmq()
-            else:
-                from IPython.display import display
-                display(self)
-        else:
-            raise YTNotInsideNotebook
-
-    def _repr_html_(self):
-        """Return an html representation of the plot object. Will display as a
-        png for each WindowPlotMPL instance in self.plots"""
-        ret = ''
-        canvas = mpl.FigureCanvasAgg(self.figure)
-        f = cStringIO.StringIO()
-        canvas.print_figure(f)
-        f.seek(0)
-        img = base64.b64encode(f.read())
-        ret += '<img src="data:image/png;base64,%s"><br>' % img
-        return ret
-
-    def _setup_plots(self):
-        self.axis.plot(np.array(self.x_data), np.array(self.y_data),
-                       **self.plot_spec)
-
-        xscale = self._get_field_log(self.x_field)
-        yscale = self._get_field_log(self.y_field)
-
-        xtitle = self._get_field_title(self.x_field)
-        ytitle = self._get_field_title(self.y_field)
-
-        self.axis.set_xscale(xscale)
-        self.axis.set_yscale(yscale)
-
-        self.axis.set_xlabel(xtitle)
-        self.axis.set_ylabel(ytitle)
-
-        self._plot_valid = True
-
-    @invalidate_plot
-    def set_line_property(self, property, value, index=None):
-        r"""
-        Set properties for one or all lines to be plotted.
-
-        Parameters
-        ----------
-        property : str
-            The line property to be set.
-        value : str, int, float
-            The value to set for the line property.
-        index : int
-            The index of the profile in the list of profiles to be 
-            changed.  If None, change all plotted lines.
-            Default : None.
-
-        Examples
-        --------
-
-        Change all the lines in a plot
-        plot.set_line_property("linestyle", "-")
-
-        Change a single line.
-        plot.set_line_property("linewidth", 4, index=0)
-        
-        """
-        if index is None:
-            specs = self.plot_spec
-        else:
-            specs = [self.plot_spec[index]]
-        for spec in specs:
-            spec[property] = value
-        return self
-
-    @invalidate_plot
-    def set_log(self, field, log):
-        """set a field to log or linear.
-
-        Parameters
-        ----------
-        field : string
-            the field to set a transform
-        log : boolean
-            Log on/off.
-        """
-        if field == "all":
-            self.x_log = log
-            for field in self.profiles[0].field_data.keys():
-                self.y_log[field] = log
-        else:
-            field, = self.profiles[0].data_source._determine_fields([field])
-            if field == self.profiles[0].x_field:
-                self.x_log = log
-            elif field in self.profiles[0].field_data:
-                self.y_log[field] = log
-            else:
-                raise KeyError("Field %s not in profile plot!" % (field))
-        return self
-
-    @invalidate_plot
-    def set_unit(self, field, unit):
-        """Sets a new unit for the requested field
-
-        Parameters
-        ----------
-        field : string
-           The name of the field that is to be changed.
-
-        new_unit : string or Unit object
-           The name of the new unit.
-        """
-        for profile in self.profiles:
-            if field == profile.x_field[1]:
-                profile.set_x_unit(unit)
-            elif field in self.profiles[0].field_map:
-                profile.set_field_unit(field, unit)
-            else:
-                raise KeyError("Field %s not in profile plot!" % (field))
-        return self
-
-    @invalidate_plot
-    def set_xlim(self, xmin=None, xmax=None):
-        """Sets the limits of the bin field
-
-        Parameters
-        ----------
-        
-        xmin : float or None
-          The new x minimum.  Defaults to None, which leaves the xmin
-          unchanged.
-
-        xmax : float or None
-          The new x maximum.  Defaults to None, which leaves the xmax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', 'temperature')
-        >>> pp.set_xlim(1e-29, 1e-24)
-        >>> pp.save()
-
-        """
-        for i, p in enumerate(self.profiles):
-            if xmin is None:
-                xmi = p.x_bins.min()
-            else:
-                xmi = xmin
-            if xmax is None:
-                xma = p.x_bins.max()
-            else:
-                xma = xmax
-            extrema = {p.x_field: ((xmi, str(p.x.units)), (xma, str(p.x.units)))}
-            units = {p.x_field: str(p.x.units)}
-            for field in p.field_map.values():
-                units[field] = str(p.field_data[field].units)
-            self.profiles[i] = \
-                create_profile(p.data_source, p.x_field,
-                               n_bins=len(p.x_bins)-2,
-                               fields=p.field_map.values(),
-                               weight_field=p.weight_field,
-                               accumulation=p.accumulation,
-                               fractional=p.fractional,
-                               extrema=extrema, units=units)
-        return self
-
-    @invalidate_plot
-    def set_ylim(self, field, ymin=None, ymax=None):
-        """Sets the plot limits for the specified field we are binning.
-
-        Parameters
-        ----------
-
-        field : string or field tuple
-
-        The field that we want to adjust the plot limits for.
-        
-        ymin : float or None
-          The new y minimum.  Defaults to None, which leaves the ymin
-          unchanged.
-
-        ymax : float or None
-          The new y maximum.  Defaults to None, which leaves the ymax
-          unchanged.
-
-        Examples
-        --------
-
-        >>> import yt
-        >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030')
-        >>> pp = yt.ProfilePlot(ds.all_data(), 'density', ['temperature', 'x-velocity'])
-        >>> pp.set_ylim('temperature', 1e4, 1e6)
-        >>> pp.save()
-
-        """
-        for i, p in enumerate(self.profiles):
-            if field is 'all':
-                fields = self.axes.keys()
-            else:
-                fields = ensure_list(field)
-            for profile in self.profiles:
-                for field in profile.data_source._determine_fields(fields):
-                    if field in profile.field_map:
-                        field = profile.field_map[field]
-                    self.axes.ylim[field] = (ymin, ymax)
-                    # Continue on to the next profile.
-                    break
-        return self
-
-    def _get_field_log(self, field):
-        ds = self.data_source.ds
-        f, = self.data_source._determine_fields([field])
-        fi = ds._get_field_info(*f)
-        do_log = fi.take_log
-        scales = {True: 'log', False: 'linear'}
-        return scales[do_log]
-
-    def _get_field_label(self, field, field_info, field_unit, fractional=False):
-        field_unit = field_unit.latex_representation()
-        field_name = field_info.display_name
-        if isinstance(field, tuple): field = field[1]
-        if field_name is None:
-            field_name = r'$\rm{'+field+r'}$'
-            field_name = r'$\rm{'+field.replace('_','\/').title()+r'}$'
-        elif field_name.find('$') == -1:
-            field_name = field_name.replace(' ','\/')
-            field_name = r'$\rm{'+field_name+r'}$'
-        if fractional:
-            label = field_name + r'$\rm{\/Probability\/Density}$'
-        elif field_unit is None or field_unit == '' or field_unit == '1':
-            label = field_name
-        else:
-            field_unit = field_unit.latex_representation()
-            label = field_name+r'$\/\/('+field_unit+r')$'
-        return label
-
-    def _get_field_title(self, field):
-        ds = self.data_source.ds
-        f, = self.data_source._determine_fields([field])
-        fi = ds._get_field_info(*f)
-        field_unit = Unit(fi.units, registry=self.data_source.ds.unit_registry)
-        title = self._get_field_label(field, fi, field_unit)
-        return title


https://bitbucket.org/yt_analysis/yt/commits/7c48b53f10cb/
Changeset:   7c48b53f10cb
Branch:      yt
User:        ngoldbaum
Date:        2014-09-30 19:31:12+00:00
Summary:     Reverting one more change
Affected #:  1 file

diff -r d0e473b58537c3eda02971d9958cf6dc37ff60f4 -r 7c48b53f10cb25b0b2aa820706f5f54ec292e9ae yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -26,7 +26,6 @@
     config.add_subpackage("sph")
     config.add_subpackage("stream")
     config.add_subpackage("boxlib/tests")
-    config.add_subpackage("pluto")
     config.add_subpackage("flash/tests")
     config.add_subpackage("enzo/tests")
     config.add_subpackage("stream/tests")

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.


More information about the yt-svn mailing list