[yt-svn] commit/yt: 8 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Sat Apr 19 17:10:19 PDT 2014


8 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/7cf523a36d10/
Changeset:   7cf523a36d10
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-07 15:19:50
Summary:     Adding ability to find Metallicity column fields in DID Gadget output.
Affected #:  1 file

diff -r 5e0ca94a1e4d1e1c583b1599ff694ee761a8a747 -r 7cf523a36d103d0cf619b424dcfa6f61d0391ec9 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -113,7 +113,9 @@
                     elif field in self._element_names:
                         rfield = 'ElementAbundance/' + field
                         data = g[rfield][:][mask,...]
-
+                    elif field.startswith("Metallicity_"):
+                        col = int(field.rsplit("_", 1)[-1])
+                        data = g["Metallicity"][:,col][mask]
                     else:
                         data = g[field][:][mask,...]
 
@@ -190,6 +192,10 @@
                     for j in gp.keys():
                         kk = j
                         fields.append((ptype, str(kk)))
+                elif k == 'Metallicity' and len(g[k].shape) > 1:
+                    # Vector of metallicity
+                    for i in range(g[k].shape[1]):
+                        fields.append((ptype, "Metallicity_%02i" % i))
                 else:
                     kk = k
                     if not hasattr(g[kk], "shape"): continue


https://bitbucket.org/yt_analysis/yt/commits/e454e62916e2/
Changeset:   e454e62916e2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-07 15:41:03
Summary:     Adding in FIRE fields.
Affected #:  1 file

diff -r 7cf523a36d103d0cf619b424dcfa6f61d0391ec9 -r e454e62916e223426bf83a98e8c7a25cfbe2bda1 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -63,10 +63,21 @@
         ("Metals", ("code_metallicity", ["metallicity"], None)),
         ("Phi", ("code_length", [], None)),
         ("FormationTime", ("code_time", ["creation_time"], None)),
+        # These are metallicity fields that get discovered for FIRE simulations
+        ("Metallicity_00", ("", ["metallicity"], None)),
+        ("Metallicity_01", ("", ["He_fraction"], None)),
+        ("Metallicity_02", ("", ["C_fraction"], None)),
+        ("Metallicity_03", ("", ["N_fraction"], None)),
+        ("Metallicity_04", ("", ["O_fraction"], None)),
+        ("Metallicity_05", ("", ["Ne_fraction"], None)),
+        ("Metallicity_06", ("", ["Mg_fraction"], None)),
+        ("Metallicity_07", ("", ["Si_fraction"], None)),
+        ("Metallicity_08", ("", ["S_fraction"], None)),
+        ("Metallicity_09", ("", ["Ca_fraction"], None)),
+        ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
 
-
 class TipsyFieldInfo(SPHFieldInfo):
 
     def __init__(self, pf, field_list, slice_info = None):


https://bitbucket.org/yt_analysis/yt/commits/09847d4a00f3/
Changeset:   09847d4a00f3
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-07 15:53:15
Summary:     Adding metal mass fractions.
Affected #:  1 file

diff -r e454e62916e223426bf83a98e8c7a25cfbe2bda1 -r 09847d4a00f317315adccc2d49787a80f945208b yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -77,6 +77,14 @@
         ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
+    def setup_particle_fields(self, ptype):
+        super(SPHFieldInfo, self).setup_particle_fields(ptype)
+        for _, fname in self.field_aliases:
+            if _ != ptype: continue
+            if not fname.endswith("_fraction"): continue
+            element, _ = fname.split("_")
+            add_species_field_by_fraction(self, ptype, element,
+                                          particle_type=True)
 
 class TipsyFieldInfo(SPHFieldInfo):
 


https://bitbucket.org/yt_analysis/yt/commits/3b80394058f0/
Changeset:   3b80394058f0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-07 16:09:47
Summary:     Merging with Britton's species work
Affected #:  6 files

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/fields/field_info_container.py
--- a/yt/fields/field_info_container.py
+++ b/yt/fields/field_info_container.py
@@ -61,6 +61,7 @@
         self.field_list = field_list
         self.slice_info = slice_info
         self.field_aliases = {}
+        self.species_names = []
         self.setup_fluid_aliases()
 
     def setup_fluid_fields(self):

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/fields/fluid_fields.py
--- a/yt/fields/fluid_fields.py
+++ b/yt/fields/fluid_fields.py
@@ -162,12 +162,22 @@
     registry.add_field((ftype, "metal_mass"),
                        function=_metal_mass,
                        units="g")
+
+    def _number_density(field, data):
+        field_data = np.zeros_like(data["gas", "%s_number_density" % \
+                                        data.pf.field_info.species_names[0]])
+        for species in data.pf.field_info.species_names:
+            field_data += data["gas", "%s_number_density" % species]
+        return field_data
+    registry.add_field((ftype, "number_density"),
+                       function = _number_density,
+                       units="cm**-3")
     
     def _mean_molecular_weight(field, data):
         return (data[ftype, "density"] / (mh * data[ftype, "number_density"]))
     registry.add_field((ftype, "mean_molecular_weight"),
               function=_mean_molecular_weight,
-              units=r"")
+              units="")
 
     setup_gradient_fields(registry, (ftype, "pressure"), "dyne/cm**2",
                           slice_info)

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/fields/interpolated_fields.py
--- /dev/null
+++ b/yt/fields/interpolated_fields.py
@@ -0,0 +1,49 @@
+"""
+Fields from interpolating data tables.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2014, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.fields.local_fields import add_field
+
+from yt.utilities.linear_interpolators import \
+    UnilinearFieldInterpolator, \
+    BilinearFieldInterpolator, \
+    TrilinearFieldInterpolator
+
+_int_class = {1: UnilinearFieldInterpolator,
+              2: BilinearFieldInterpolator,
+              3: TrilinearFieldInterpolator}
+
+def add_interpolated_field(name, units, table_data, axes_data, axes_fields,
+                           ftype="gas", particle_type=False, validators=None,
+                           truncate=True):
+    
+    if len(table_data.shape) not in _int_class:
+        raise RuntimeError("Interpolated field can only be created from 1d, 2d, or 3d data.")
+
+    if len(axes_fields) != len(axes_data) or len(axes_fields) != len(table_data.shape):
+        raise RuntimeError("Data dimension mismatch: data is %d, %d axes data provided, and %d axes fields provided." %
+                           (len(table_data.shape), len(axes_data), len(axes_fields)))
+
+    int_class = _int_class[len(table_data.shape)]
+    my_interpolator = int_class(table_data, axes_data, axes_fields, truncate=truncate)
+
+    def _interpolated_field(field, data):
+        return my_interpolator(data)
+    add_field((ftype, name), 
+              function=_interpolated_field,
+              units=units,
+              validators=validators, particle_type=particle_type)
+              

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -14,15 +14,22 @@
 #-----------------------------------------------------------------------------
 
 import numpy as np
+import re
 
 from yt.utilities.physical_constants import \
     mh, \
     mass_sun_cgs, \
     amu_cgs
+from yt.utilities.physical_ratios import \
+    primordial_H_mass_fraction
 from yt.funcs import *
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
 
+_primordial_mass_fraction = \
+  {"H": primordial_H_mass_fraction,
+   "He" : (1 - primordial_H_mass_fraction)}
+    
 # See YTEP-0003 for details, but we want to ensure these fields are all
 # populated:
 #
@@ -50,7 +57,7 @@
     weight *= amu_cgs
     def _number_density(field, data):
         return data[ftype, "%s_density" % species] \
-             / amu_cgs
+             / weight
     return _number_density
 
 def _create_density_func(ftype, species):
@@ -102,3 +109,53 @@
                        function = _create_number_density_func(ftype, species),
                        particle_type = particle_type,
                        units = "cm**-3")
+
+def add_nuclei_density_fields(registry, ftype,
+                              particle_type = False):
+    elements = _get_all_elements(registry.species_names)
+    for element in elements:
+        registry.add_field((ftype, "%s_nuclei_density" % element),
+                           function = _nuclei_density,
+                           particle_type = particle_type,
+                           units = "cm**-3")
+    if len(elements) == 0:
+        for element in ["H", "He"]:
+            registry.add_field((ftype, "%s_nuclei_density" % element),
+                               function = _default_nuclei_density,
+                               particle_type = particle_type,
+                               units = "cm**-3")
+
+def _default_nuclei_density(field, data):
+    element = field.name[1][:field.name[1].find("_")]
+    return data["gas", "density"] * _primordial_mass_fraction[element] / \
+      ChemicalFormula(element).weight / amu_cgs
+        
+def _nuclei_density(field, data):
+    element = field.name[1][:field.name[1].find("_")]
+    field_data = np.zeros_like(data["gas", "%s_number_density" % 
+                                    data.pf.field_info.species_names[0]])
+    for species in data.pf.field_info.species_names:
+        nucleus = species
+        if "_" in species:
+            nucleus = species[:species.find("_")]
+        num = _get_element_multiple(nucleus, element)
+        field_data += num * data["gas", "%s_number_density" % species]
+    return field_data
+
+def _get_all_elements(species_list):
+    elements = []
+    for species in species_list:
+        for item in re.findall('[A-Z][a-z]?|[0-9]+', species):
+            if not item.isdigit() and item not in elements \
+              and item != "El":
+                elements.append(item)
+    return elements
+    
+def _get_element_multiple(compound, element):
+    my_split = re.findall('[A-Z][a-z]?|[0-9]+', compound)
+    if element not in my_split:
+        return 0
+    loc = my_split.index(element)
+    if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
+        return 1
+    return int(my_split[loc + 1])

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -21,6 +21,7 @@
 from yt.units.yt_array import \
     YTArray
 from yt.fields.species_fields import \
+    add_nuclei_density_fields, \
     add_species_field_by_density
 from yt.utilities.physical_constants import \
     mh, me, mp, \
@@ -42,7 +43,7 @@
     'HM'      : 'H_m1',
     'DI'      : 'D',
     'DII'     : 'D_p1',
-    'HD'      : 'HD',
+    'HDI'     : 'HD',
     'Electron': 'El'
 }
 
@@ -119,8 +120,10 @@
                            take_log=True,
                            units="code_mass/code_length**3")
         yt_name = known_species_names[species]
-        self.alias(("gas", "%s_density" % yt_name),
-                   ("enzo", "%s_Density" % species))
+        # don't alias electron density since mass is wrong
+        if species != "Electron":
+            self.alias(("gas", "%s_density" % yt_name),
+                       ("enzo", "%s_Density" % species))
         add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
@@ -135,7 +138,8 @@
                        units = "g/cm**3")
         for sp in species_names:
             self.add_species_field(sp)
-
+            self.species_names.append(known_species_names[sp])
+        add_nuclei_density_fields(self, "gas")
 
     def setup_fluid_fields(self):
         # Now we conditionally load a few other things.

diff -r 09847d4a00f317315adccc2d49787a80f945208b -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f yt/utilities/physical_ratios.py
--- a/yt/utilities/physical_ratios.py
+++ b/yt/utilities/physical_ratios.py
@@ -94,6 +94,7 @@
 jansky_cgs = 1.0e-23
 # Cosmological constants
 rho_crit_g_cm3_h2 = 1.8788e-29
+primordial_H_mass_fraction = 0.76
 
 # Misc. Approximations
 mass_mean_atomic_cosmology = 1.22


https://bitbucket.org/yt_analysis/yt/commits/5c14a1aba1da/
Changeset:   5c14a1aba1da
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-07 19:18:50
Summary:     Starting the species field plugin.
Affected #:  2 files

diff -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 yt/fields/species_fields.py
--- a/yt/fields/species_fields.py
+++ b/yt/fields/species_fields.py
@@ -25,11 +25,13 @@
 from yt.funcs import *
 from yt.utilities.chemical_formulas import \
     ChemicalFormula
+from .field_plugin_registry import \
+    register_field_plugin
 
 _primordial_mass_fraction = \
   {"H": primordial_H_mass_fraction,
    "He" : (1 - primordial_H_mass_fraction)}
-    
+
 # See YTEP-0003 for details, but we want to ensure these fields are all
 # populated:
 #
@@ -159,3 +161,20 @@
     if loc == len(my_split) - 1 or not my_split[loc + 1].isdigit():
         return 1
     return int(my_split[loc + 1])
+
+ at register_field_plugin
+def setup_species_fields(registry, ftype = "gas", slice_info = None):
+    # We have to check what type of field this is -- if it's particles, then we
+    # set particle_type to True.
+    particle_type = ftype not in registry.pf.fluid_types
+    for species in registry.species_names:
+        # These are all the species we should be looking for fractions or
+        # densities of.
+        if (ftype, "%s_density" % species) in registry:
+            func = add_species_field_by_density
+        elif (ftype, "%s_fraction" % species) in registry:
+            func = add_species_field_by_fraction
+        else:
+            # Skip it
+            continue
+        func(registry, ftype, species, particle_type)

diff -r 3b80394058f0198ba01cdb1798cc9d589fcaed1f -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -31,7 +31,8 @@
 from yt.utilities.physical_constants import mh
 from yt.fields.species_fields import \
     add_species_field_by_fraction, \
-    add_species_field_by_density
+    add_species_field_by_density, \
+    setup_species_fields
 
 from yt.fields.particle_fields import \
     add_volume_weighted_smoothed_field
@@ -77,14 +78,16 @@
         ("Metallicity_10", ("", ["Fe_fraction"], None)),
     )
 
-    def setup_particle_fields(self, ptype):
-        super(SPHFieldInfo, self).setup_particle_fields(ptype)
-        for _, fname in self.field_aliases:
-            if _ != ptype: continue
-            if not fname.endswith("_fraction"): continue
-            element, _ = fname.split("_")
-            add_species_field_by_fraction(self, ptype, element,
-                                          particle_type=True)
+    def __init__(self, *args, **kwargs):
+        super(SPHFieldInfo, self).__init__(*args, **kwargs)
+        # Special case for FIRE
+        if ("PartType0", "Metallicity_00") in self.field_list:
+            self.species_names += ["He", "C", "N", "O", "Ne", "Mg", "Si", "S",
+                "Ca", "Fe"]
+
+    def setup_particle_fields(self, ptype, *args, **kwargs):
+        super(SPHFieldInfo, self).setup_particle_fields(ptype, *args, **kwargs)
+        setup_species_fields(self, ptype)
 
 class TipsyFieldInfo(SPHFieldInfo):
 


https://bitbucket.org/yt_analysis/yt/commits/7e29fc83eb61/
Changeset:   7e29fc83eb61
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-17 13:02:00
Summary:     Merging from mainline
Affected #:  97 files

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5160,3 +5160,4 @@
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
 f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
 079e456c38a87676472a458210077e2be325dc85 last_gplv3
+f327552a6ede406b82711fb800ebcd5fe692d1cb yt-3.0a4

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -7,4 +7,9 @@
 include doc/extensions/README doc/Makefile
 prune doc/source/reference/api/generated
 prune doc/build/
+recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
+prune yt/frontends/_skeleton
+prune tests
+graft yt/gui/reason/html/resources
+exclude clean.sh .hgchurn
 recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/extensions/notebook_sphinxext.py
--- a/doc/extensions/notebook_sphinxext.py
+++ b/doc/extensions/notebook_sphinxext.py
@@ -15,8 +15,13 @@
     required_arguments = 1
     optional_arguments = 1
     option_spec = {'skip_exceptions' : directives.flag}
+    final_argument_whitespace = True
 
-    def run(self):
+    def run(self): # check if there are spaces in the notebook name
+        nb_path = self.arguments[0]
+        if ' ' in nb_path: raise ValueError(
+            "Due to issues with docutils stripping spaces from links, white "
+            "space is not allowed in notebook filenames '{0}'".format(nb_path))
         # check if raw html is supported
         if not self.state.document.settings.raw_enabled:
             raise self.warning('"%s" directive disabled.' % self.name)
@@ -24,10 +29,11 @@
         # get path to notebook
         source_dir = os.path.dirname(
             os.path.abspath(self.state.document.current_source))
-        nb_basename = os.path.basename(self.arguments[0])
+        nb_filename = self.arguments[0]
+        nb_basename = os.path.basename(nb_filename)
         rst_file = self.state_machine.document.attributes['source']
         rst_dir = os.path.abspath(os.path.dirname(rst_file))
-        nb_abs_path = os.path.join(rst_dir, nb_basename)
+        nb_abs_path = os.path.abspath(os.path.join(rst_dir, nb_filename))
 
         # Move files around.
         rel_dir = os.path.relpath(rst_dir, setup.confdir)
@@ -89,7 +95,6 @@
         return [nb_node]
 
 
-
 class notebook_node(nodes.raw):
     pass
 
@@ -109,6 +114,7 @@
     # http://imgur.com/eR9bMRH
     header = header.replace('<style', '<style scoped="scoped"')
     header = header.replace('body {\n  overflow: visible;\n  padding: 8px;\n}\n', '')
+    header = header.replace("code,pre{", "code{")
 
     # Filter out styles that conflict with the sphinx theme.
     filter_strings = [
@@ -120,8 +126,16 @@
     ]
     filter_strings.extend(['h%s{' % (i+1) for i in range(6)])
 
+    line_begin_strings = [
+        'pre{',
+        'p{margin'
+        ]
+
     header_lines = filter(
         lambda x: not any([s in x for s in filter_strings]), header.split('\n'))
+    header_lines = filter(
+        lambda x: not any([x.startswith(s) for s in line_begin_strings]), header_lines)
+
     header = '\n'.join(header_lines)
 
     # concatenate raw html lines

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/analyzing/units/1)_Symbolic_Units.ipynb
--- a/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
+++ b/doc/source/analyzing/units/1)_Symbolic_Units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:52f186664831f5290b31ec433114927b9771e224bd79d0c82dd3d9a8d9c09bf6"
+  "signature": "sha256:5d881061b9e82bd9df5d3598983c8ddc5fbec35e3bf7ae4524430dc558e27489"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -307,7 +307,7 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`:"
+      "For convenience, `unit_quantity` is also available via `uq` and `unit_array` is available via `ua`.  You can use these arrays to create dummy arrays with the same units as another array - this is sometimes easier than manually creating a new array or quantity."
      ]
     },
     {
@@ -402,11 +402,13 @@
       "\n",
       "print a/b\n",
       "print (a/b).in_cgs()\n",
+      "print (a/b).in_mks()\n",
       "print (a/b).in_units('km/s')\n",
       "print ''\n",
       "\n",
       "print a*b\n",
-      "print (a*b).in_cgs()"
+      "print (a*b).in_cgs()\n",
+      "print (a*b).in_mks()"
      ],
      "language": "python",
      "metadata": {},
@@ -433,7 +435,10 @@
       "from yt.utilities.physical_constants import G, kboltz\n",
       "\n",
       "print \"Newton's constant: \", G\n",
-      "print \"Boltzmann constant: \", kboltz"
+      "print \"Newton's constant in MKS: \", G.in_mks(), \"\\n\"\n",
+      "\n",
+      "print \"Boltzmann constant: \", kboltz\n",
+      "print \"Boltzmann constant in MKS: \", kboltz.in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
--- a/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
+++ b/doc/source/analyzing/units/2)_Data_Selection_and_fields.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:8e1a5db9e3869bcf761ff39c5a95d21458b7c4205f00da3d3f973d398422a466"
+  "signature": "sha256:9e7ac626b3609cf5f3fb2d4ebc6e027ed923ab1c22f0acc212e42fc7535e3205"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -73,6 +73,7 @@
       "mass = dd['cell_mass']\n",
       "\n",
       "print \"Cell Masses in CGS: \\n\", mass, \"\\n\"\n",
+      "print \"Cell Masses in MKS: \\n\", mass.in_mks(), \"\\n\"\n",
       "print \"Cell Masses in Solar Masses: \\n\", mass.in_units('Msun'), \"\\n\"\n",
       "print \"Cell Masses in code units: \\n\", mass.in_units('code_mass'), \"\\n\""
      ],
@@ -87,6 +88,7 @@
       "dx = dd['dx']\n",
       "print \"Cell dx in code units: \\n\", dx, \"\\n\"\n",
       "print \"Cell dx in centimeters: \\n\", dx.in_cgs(), \"\\n\"\n",
+      "print \"Cell dx in meters: \\n\", dx.in_units('m'), \"\\n\"\n",
       "print \"Cell dx in megaparsecs: \\n\", dx.in_units('Mpc'), \"\\n\""
      ],
      "language": "python",
@@ -109,8 +111,10 @@
       "\n",
       "* `in_units`\n",
       "* `in_cgs`\n",
+      "* `in_mks`\n",
       "* `convert_to_units`\n",
-      "* `convert_to_cgs`"
+      "* `convert_to_cgs`\n",
+      "* `convert_to_mks`"
      ]
     },
     {
@@ -134,15 +138,16 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "The second, `in_cgs`, returns a copy of the array converted into the base units of yt's CGS unit system:"
+      "`in_cgs` and `in_mks` return a copy of the array converted CGS and MKS units, respectively:"
      ]
     },
     {
      "cell_type": "code",
      "collapsed": false,
      "input": [
-      "print (dd['pressure']/dd['density'])\n",
-      "print (dd['pressure']/dd['density']).in_cgs()"
+      "print (dd['pressure'])\n",
+      "print (dd['pressure']).in_cgs()\n",
+      "print (dd['pressure']).in_mks()"
      ],
      "language": "python",
      "metadata": {},

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
--- a/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
+++ b/doc/source/analyzing/units/3)_Comoving_units_and_code_units.ipynb
@@ -1,7 +1,7 @@
 {
  "metadata": {
   "name": "",
-  "signature": "sha256:a07224c25b1d938bc1014b6d9d09c1a2392912f21b821b07615e65302677ef9b"
+  "signature": "sha256:242d7005d45a82744713bfe6389e49d47f39b524d1e7fcbf5ceb2e65dc473e68"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -20,77 +20,6 @@
      "level": 3,
      "metadata": {},
      "source": [
-      "The unit registry"
-     ]
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "When a dataset is loaded, we attempt to detect and assign conversion factors from the internal simulation coordinate system and the physical CGS system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.mods import *\n",
-      "\n",
-      "ds = load('Enzo_64/DD0043/data0043')\n",
-      "\n",
-      "ds.unit_registry"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "ds.unit_registry.lut"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "\n",
-      "It is not necessary to specify a unit registry when creating a new `YTArray` or `YTQuantity` since `yt` ships with a default unit registry:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "from yt.units.unit_object import default_unit_registry as reg\n",
-      "\n",
-      "unit_names = reg.lut.keys()\n",
-      "unit_names.sort()\n",
-      "\n",
-      "# Print out the first 10 unit names\n",
-      "for i in range(10):\n",
-      "    print unit_names[i], reg.lut[unit_names[i]]"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
-      "Each entry in the lookup table is the string name of a base unit and a tuple containing the CGS conversion factor and dimensions of the unit symbol."
-     ]
-    },
-    {
-     "cell_type": "heading",
-     "level": 3,
-     "metadata": {},
-     "source": [
       "Code units"
      ]
     },
@@ -98,25 +27,6 @@
      "cell_type": "markdown",
      "metadata": {},
      "source": [
-      "Some of the most interesting unit symbols are the ones for \"code\" units:"
-     ]
-    },
-    {
-     "cell_type": "code",
-     "collapsed": false,
-     "input": [
-      "code_unit_names = [un for un in unit_names if 'code_' in un]\n",
-      "\n",
-      "print code_unit_names"
-     ],
-     "language": "python",
-     "metadata": {},
-     "outputs": []
-    },
-    {
-     "cell_type": "markdown",
-     "metadata": {},
-     "source": [
       "Let's take a look at a cosmological enzo dataset to play with converting between physical units and code units:"
      ]
     },
@@ -132,13 +42,22 @@
      "outputs": []
     },
     {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "The conversion factors between Enzo's internal unit system and the physical CGS system are stored in the dataset's `unit_registry` object.  Code units have names like `code_length` and `code_time`. Let's take a look at the names of all of the code units, along with their CGS conversion factors for this cosmological enzo dataset:"
+     ]
+    },
+    {
      "cell_type": "code",
      "collapsed": false,
      "input": [
       "reg = ds.unit_registry\n",
       "\n",
-      "for un in code_unit_names:\n",
-      "    print un, reg.lut[un]"
+      "for un in reg.keys():\n",
+      "    if un.startswith('code_'):\n",
+      "        fmt_tup = (un, reg.lut[un][0], reg.lut[un][1])\n",
+      "        print \"Unit name:      {:<15}\\nCGS conversion: {:<15}\\nDimensions:     {:<15}\\n\".format(*fmt_tup)"
      ],
      "language": "python",
      "metadata": {},
@@ -295,6 +214,95 @@
      "language": "python",
      "metadata": {},
      "outputs": []
+    },
+    {
+     "cell_type": "heading",
+     "level": 3,
+     "metadata": {},
+     "source": [
+      "The unit registry"
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When you create a `YTArray` without referring to a unit registry, `yt` uses the default unit registry, which does not include code units or comoving units."
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = YTQuantity(3, 'cm')\n",
+      "\n",
+      "print a.units.registry.keys()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "When a dataset is loaded, `yt` infers conversion factors from the internal simulation unit system to the CGS unit system.  These conversion factors are stored in a `unit_registry` along with conversion factors to the other known unit symbols.  For the cosmological Enzo dataset we loaded earlier, we can see there are a number of additional unit symbols not defined in the default unit lookup table:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "print sorted([k for k in ds.unit_registry.keys() if k not in a.units.registry.keys()])"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "Since code units do not appear in the default unit symbol lookup table, one must explicitly refer to a unit registry when creating a `YTArray` to be able to convert to the unit system of a simulation."
+     ]
+    },
+    {
+     "cell_type": "markdown",
+     "metadata": {},
+     "source": [
+      "To make this as clean as possible, there are array and quantity-creating convenience functions attached to the `Dataset` object:\n",
+      "\n",
+      "* `ds.arr()`\n",
+      "* `ds.quan()`\n",
+      "\n",
+      "These functions make it straightforward to create arrays and quantities that can be converted to code units or comoving units.  For example:"
+     ]
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "a = ds.quan(3, 'code_length')\n",
+      "\n",
+      "print a\n",
+      "print a.in_cgs()\n",
+      "print a.in_units('Mpccm/h')"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
+    },
+    {
+     "cell_type": "code",
+     "collapsed": false,
+     "input": [
+      "b = ds.arr([3, 4, 5], 'Mpccm/h')\n",
+      "print b\n",
+      "print b.in_cgs()"
+     ],
+     "language": "python",
+     "metadata": {},
+     "outputs": []
     }
    ],
    "metadata": {}

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/conf.py
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -59,7 +59,7 @@
 master_doc = 'index'
 
 # General information about the project.
-project = u'yt'
+project = u'The yt Project'
 copyright = u'2013, the yt Project'
 
 # The version info for the project you're documenting, acts as replacement for
@@ -119,11 +119,16 @@
 # documentation.
 html_theme_options = dict(
     bootstrap_version = "3",
-    bootswatch_theme = "readable"
+    bootswatch_theme = "readable",
+    navbar_links = [
+        ("How to get help", "help/index"),
+        ("Bootcamp notebooks", "bootcamp/index"),
+        ("Cookbook", "cookbook/index"),
+        ],
+    navbar_sidebarrel = False,
+    globaltoc_depth = 2,
 )
 
-#html_style = "agogo_yt.css"
-
 # Add any paths that contain custom themes here, relative to this directory.
 #html_theme_path = []
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/amrkdtree_downsampling.py
--- a/doc/source/cookbook/amrkdtree_downsampling.py
+++ b/doc/source/cookbook/amrkdtree_downsampling.py
@@ -43,7 +43,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # This looks better.  Now let's try turning on opacity.
@@ -56,7 +56,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v3.png", clip_ratio=6.0)
 
 # This looks pretty good, now lets go back to the full resolution AMRKDTree

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/find_clumps.py
--- a/doc/source/cookbook/find_clumps.py
+++ b/doc/source/cookbook/find_clumps.py
@@ -19,8 +19,8 @@
 # Now we set some sane min/max values between which we want to find contours.
 # This is how we tell the clump finder what to look for -- it won't look for
 # contours connected below or above these threshold values.
-c_min = 10**na.floor(na.log10(data_source[field]).min()  )
-c_max = 10**na.floor(na.log10(data_source[field]).max()+1)
+c_min = 10**np.floor(np.log10(data_source[field]).min()  )
+c_max = 10**np.floor(np.log10(data_source[field]).max()+1)
 
 # keep only clumps with at least 20 cells
 function = 'self.data[\'%s\'].size > 20' % field

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/multi_plot_slice_and_proj.py
--- a/doc/source/cookbook/multi_plot_slice_and_proj.py
+++ b/doc/source/cookbook/multi_plot_slice_and_proj.py
@@ -1,4 +1,5 @@
 from yt.mods import * # set up our namespace
+from yt.visualization.base_plot_types import get_multi_plot
 import matplotlib.colorbar as cb
 from matplotlib.colors import LogNorm
 
@@ -18,7 +19,7 @@
 
 slc = pf.slice(2, 0.0, fields=["density","temperature","velocity_magnitude"], 
                  center=pf.domain_center)
-proj = pf.proj(2, "density", weight_field="density", center=pf.domain_center)
+proj = pf.proj("density", 2, weight_field="density", center=pf.domain_center)
 
 slc_frb = slc.to_frb((1.0, "mpc"), 512)
 proj_frb = proj.to_frb((1.0, "mpc"), 512)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/offaxis_projection.py
--- a/doc/source/cookbook/offaxis_projection.py
+++ b/doc/source/cookbook/offaxis_projection.py
@@ -31,4 +31,4 @@
 # relating to what our dataset is called.
 # We save the log of the values so that the colors do not span
 # many orders of magnitude.  Try it without and see what happens.
-write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+write_image(np.log10(image), "%s_offaxis_projection.png" % pf)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/opaque_rendering.py
--- a/doc/source/cookbook/opaque_rendering.py
+++ b/doc/source/cookbook/opaque_rendering.py
@@ -21,13 +21,13 @@
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5], colormap = 'RdBu_r')
 cam.snapshot("v1.png", clip_ratio=6.0)
 
-# In this case, the default alphas used (na.logspace(-3,0,Nbins)) does not
+# In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not
 # accentuate the outer regions of the galaxy. Let's start by bringing up the
 # alpha values for each contour to go between 0.1 and 1.0
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=na.logspace(0,0,4), colormap = 'RdBu_r')
+        alpha=np.logspace(0,0,4), colormap = 'RdBu_r')
 cam.snapshot("v2.png", clip_ratio=6.0)
 
 # Now let's set the grey_opacity to True.  This should make the inner portions
@@ -40,14 +40,14 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=10.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=10.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v4.png", clip_ratio=6.0)
 
 # Let's bump up again to see if we can obscure the inner contour.
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=30.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=30.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v5.png", clip_ratio=6.0)
 
 # Now we are losing sight of everything.  Let's see if we can obscure the next
@@ -55,7 +55,7 @@
 
 tf.clear()
 tf.add_layers(4, 0.01, col_bounds = [-27.5,-25.5],
-        alpha=100.0*na.ones(4,dtype='float64'), colormap = 'RdBu_r')
+        alpha=100.0*np.ones(4,dtype='float64'), colormap = 'RdBu_r')
 cam.snapshot("v6.png", clip_ratio=6.0)
 
 # That is very opaque!  Now lets go back and see what it would look like with

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/rendering_with_box_and_grids.py
--- a/doc/source/cookbook/rendering_with_box_and_grids.py
+++ b/doc/source/cookbook/rendering_with_box_and_grids.py
@@ -12,7 +12,7 @@
 
 # Create a transfer function to map field values to colors.
 # We bump up our minimum to cut out some of the background fluid
-tf = ColorTransferFunction((na.log10(mi)+2.0, na.log10(ma)))
+tf = ColorTransferFunction((np.log10(mi)+2.0, np.log10(ma)))
 
 # Add three guassians, evenly spaced between the min and
 # max specified above with widths of 0.02 and using the

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/save_profiles.py
--- a/doc/source/cookbook/save_profiles.py
+++ b/doc/source/cookbook/save_profiles.py
@@ -33,7 +33,7 @@
 # separate columns into separate NumPy arrays, it is essential to set unpack=True.
 
 r, dens, std_dens, temp, std_temp = \
-	na.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
+	np.loadtxt("sloshing_nomag2_hdf5_plt_cnt_0150_profile.dat", unpack=True)
 
 fig1 = plt.figure()
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/cookbook/simple_slice_matplotlib_example.py
--- a/doc/source/cookbook/simple_slice_matplotlib_example.py
+++ b/doc/source/cookbook/simple_slice_matplotlib_example.py
@@ -21,7 +21,7 @@
 rect = (0.2,0.2,0.2,0.2)
 new_ax = fig.add_axes(rect)
 
-n, bins, patches = new_ax.hist(na.random.randn(1000)+20, 50,
+n, bins, patches = new_ax.hist(np.random.randn(1000)+20, 50,
     facecolor='yellow', edgecolor='yellow')
 new_ax.set_xlabel('Dinosaurs per furlong')
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/developing/developing.rst
--- a/doc/source/developing/developing.rst
+++ b/doc/source/developing/developing.rst
@@ -379,7 +379,7 @@
    something_else``.  Python is more forgiving than C.
  * Avoid copying memory when possible. For example, don't do ``a =
    a.reshape(3,4)`` when ``a.shape = (3,4)`` will do, and ``a = a * 3`` should be
-   ``na.multiply(a, 3, a)``.
+   ``np.multiply(a, 3, a)``.
  * In general, avoid all double-underscore method names: ``__something`` is
    usually unnecessary.
  * Doc strings should describe input, output, behavior, and any state changes

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/installing.rst
--- a/doc/source/installing.rst
+++ b/doc/source/installing.rst
@@ -12,7 +12,7 @@
 computation engine, Matplotlib for some visualization tasks and Mercurial for
 version control.  Because installation of all of these interlocking parts can 
 be time-consuming, yt provides an installation script which downloads and builds
-a fully-isolated Python + Numpy + Matplotlib + HDF5 + Mercurial installation.  
+a fully-isolated Python + NumPy + Matplotlib + HDF5 + Mercurial installation.  
 yt supports Linux and OSX deployment, with the possibility of deployment on 
 other Unix-like systems (XSEDE resources, clusters, etc.).  Windows is not 
 supported.
@@ -86,16 +86,41 @@
 Alternative Installation Methods
 --------------------------------
 
-If you want to forego the use of the install script, you need to make sure 
-you have yt's dependencies installed on your system.  These include: a C compiler, 
-``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``numpy``, and 
-``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``)
-to install yt as:
+If you want to forego the use of the install script, you need to make sure you
+have yt's dependencies installed on your system.  These include: a C compiler,
+``HDF5``, ``Freetype``, ``libpng``, ``python``, ``cython``, ``NumPy``, and
+``matplotlib``.  From here, you can use ``pip`` (which comes with ``Python``) to
+install yt as:
 
 .. code-block:: bash
 
   $ pip install yt
 
+The source code for yt may be found at the Bitbucket project site and can also be
+utilized for installation. If you prefer to use it instead of relying on external
+tools, you will need ``mercurial`` to clone the official repo:
+
+.. code-block:: bash
+
+  $ hg clone https://bitbucket.org/yt_analysis/yt
+  $ cd yt
+  $ hg update yt
+  $ python setup.py install --user
+
+It will install yt into ``$HOME/.local/lib64/python2.7/site-packages``. 
+Please refer to ``setuptools`` documentation for the additional options.
+
+Provided that the required dependencies are in a predictable location, yt should
+be able to find them automatically. However, you can manually specify prefix used
+for installation of ``HDF5``, ``Freetype`` and ``libpng`` by using ``hdf5.cfg``,
+``freetype.cfg``, ``png.cfg`` or setting ``HDF5_DIR``, ``FTYPE_DIR``, ``PNG_DIR``
+environmental variables respectively, e.g.
+
+.. code-block:: bash
+
+  $ echo '/usr/local' > hdf5.cfg
+  $ export FTYPE_DIR=/opt/freetype
+
 If you choose this installation method, you do not need to run the activation
 script as it is unnecessary.
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/reference/api/api.rst
--- a/doc/source/reference/api/api.rst
+++ b/doc/source/reference/api/api.rst
@@ -709,8 +709,6 @@
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ObjectIterator
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelAnalysisInterface
    ~yt.utilities.parallel_tools.parallel_analysis_interface.ParallelObjectIterator
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ConstructedRootGrid
-   ~yt.analysis_modules.hierarchy_subset.hierarchy_subset.ExtractedHierarchy
 
 
 Testing Infrastructure

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
--- a/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
+++ b/doc/source/visualizing/TransferFunctionHelper_Tutorial.ipynb
@@ -1,6 +1,7 @@
 {
  "metadata": {
-  "name": ""
+  "name": "",
+  "signature": "sha256:d75e416150ccb017cfdf89973f8d4463e780da4d9bdc9a3783001d22021d9081"
  },
  "nbformat": 3,
  "nbformat_minor": 0,
@@ -154,7 +155,7 @@
       "Npixels = 512 \n",
       "cam = pf.h.camera(c, L, W, Npixels, tfh.tf, fields=['temperature'],\n",
       "                  north_vector=[1.,0.,0.], steady_north=True, \n",
-      "                  sub_samples=5, no_ghost=False, l_max=0)\n",
+      "                  sub_samples=5, no_ghost=False)\n",
       "\n",
       "# Here we substitute the TransferFunction we constructed earlier.\n",
       "cam.transfer_function = tfh.tf\n",

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/visualizing/_cb_docstrings.inc
--- a/doc/source/visualizing/_cb_docstrings.inc
+++ b/doc/source/visualizing/_cb_docstrings.inc
@@ -32,8 +32,8 @@
    data_source = pf.disk([0.5, 0.5, 0.5], [0., 0., 1.],
                            (8., 'kpc'), (1., 'kpc'))
 
-   c_min = 10**na.floor(na.log10(data_source['density']).min()  )
-   c_max = 10**na.floor(na.log10(data_source['density']).max()+1)
+   c_min = 10**np.floor(np.log10(data_source['density']).min()  )
+   c_max = 10**np.floor(np.log10(data_source['density']).max()+1)
 
    function = 'self.data[\'Density\'].size > 20'
    master_clump = Clump(data_source, None, 'density', function=function)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 doc/source/visualizing/plots.rst
--- a/doc/source/visualizing/plots.rst
+++ b/doc/source/visualizing/plots.rst
@@ -254,7 +254,7 @@
    c = [0.5, 0.5, 0.5]
    N = 512
    image = off_axis_projection(pf, c, L, W, N, "density")
-   write_image(na.log10(image), "%s_offaxis_projection.png" % pf)
+   write_image(np.log10(image), "%s_offaxis_projection.png" % pf)
 
 Here, ``W`` is the width of the projection in the x, y, *and* z
 directions.

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -4,10 +4,9 @@
 from yt.analysis_modules.absorption_spectrum.absorption_line \
         import voigt
 
-
 def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
-        minError=1E-5, complexLim=.999,
-        fitLim=.99, minLength=3, 
+        minError=1E-4, complexLim=.995,
+        fitLim=.97, minLength=3, 
         maxLength=1000, splitLim=.99,
         output_file=None):
 
@@ -90,6 +89,7 @@
     fluxData[0]=1
     fluxData[-1]=1
 
+
     #Find all regions where lines/groups of lines are present
     cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
             complexLim=complexLim, minLength=minLength,
@@ -111,6 +111,7 @@
             yDatBounded=fluxData[b[1]:b[2]]
             yFitBounded=yFit[b[1]:b[2]]
 
+
             #Find init redshift
             z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
 
@@ -121,24 +122,33 @@
 
             #Fit Using complex tools
             newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
-                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+                    z,fitLim,minError,speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+               newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError)
+
+            if na.size(newLinesP)> 0:
+
+                #Check for EXPLOOOOSIIONNNSSS
+                newLinesP = _check_numerical_instability(x, newLinesP, speciesDict,b)
+
 
             #Check existence of partner lines if applicable
             if len(speciesDict['wavelength']) != 1:
                 newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
-                        b, minError*(b[2]-b[1]),
-                        x0, xRes, speciesDict)
+                        b, minError, x0, xRes, speciesDict)
 
-            #If flagged as a bad fit, species is lyman alpha,
-            #   and it may be a saturated line, use special tools
-            if flag and species=='lya' and min(yDatBounded)<.1:
-                newLinesP=_large_flag_fit(xBounded,yDatBounded,
-                        yFitBounded,z,speciesDict,
-                        minSize,minError*(b[2]-b[1]))
+
+
 
             #Adjust total current fit
             yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
 
+
             #Add new group to all fitted lines
             if na.size(newLinesP)>0:
                 speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
@@ -149,6 +159,7 @@
 
         allSpeciesLines[species]=speciesLines
 
+
     if output_file:
         _output_fit(allSpeciesLines, output_file)
 
@@ -205,10 +216,12 @@
     #Setup initial line guesses
     if initP==None: #Regular fit
         initP = [0,0,0] 
-        if min(yDat)<.5: #Large lines get larger initial guess 
-            initP[0] = 10**16
+        if min(yDat)<.01: #Large lines get larger initial guess 
+            initP[0] = speciesDict['init_N']*10**2
+        elif min(yDat)<.5:
+            initP[0] = speciesDict['init_N']*10**1
         elif min(yDat)>.9: #Small lines get smaller initial guess
-            initP[0] = 10**12.5
+            initP[0] = speciesDict['init_N']*10**-1
         else:
             initP[0] = speciesDict['init_N']
         initP[1] = speciesDict['init_b']
@@ -225,9 +238,16 @@
         return [],False
     
     #Values to proceed through first run
-    errSq,prevErrSq=1,1000
+    errSq,prevErrSq,prevLinesP=1,10*len(x),[]
 
+    if errBound == None:
+        errBound = len(yDat)*(max(1-yDat)*1E-2)**2
+    else:
+        errBound = errBound*len(yDat)
+
+    flag = False
     while True:
+
         #Initial parameter guess from joining parameters from all lines
         #   in lines into a single array
         initP = linesP.flatten()
@@ -237,6 +257,7 @@
                 args=(x,yDat,yFit,speciesDict),
                 epsfcn=1E-10,maxfev=1000)
 
+
         #Set results of optimization
         linesP = na.reshape(fitP,(-1,3))
 
@@ -247,17 +268,23 @@
         #Sum to get idea of goodness of fit
         errSq=sum(dif**2)
 
+        if any(linesP[:,1]==speciesDict['init_b']):
+         #   linesP = prevLinesP
+
+            flag = True
+            break
+            
         #If good enough, break
-        if errSq < errBound: 
+        if errSq < errBound:        
             break
 
         #If last fit was worse, reject the last line and revert to last fit
-        if errSq > prevErrSq*10:
+        if errSq > prevErrSq*10 :
             #If its still pretty damn bad, cut losses and try flag fit tools
             if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                linesP = prevLinesP
                 break
 
         #If too many lines 
@@ -266,21 +293,26 @@
             if errSq >1E2*errBound and speciesDict['name']=='HI lya':
                 return [],True
             else:
-                break 
+                flag = True
+                break
 
         #Store previous data in case reject next fit
         prevErrSq = errSq
         prevLinesP = linesP
 
-
         #Set up initial condition for new line
         newP = [0,0,0] 
-        if min(dif)<.1:
-            newP[0]=10**12
-        elif min(dif)>.9:
-            newP[0]=10**16
+
+        yAdjusted = 1+yFit*yNewFit-yDat
+ 
+        if min(yAdjusted)<.01: #Large lines get larger initial guess 
+            newP[0] = speciesDict['init_N']*10**2
+        elif min(yAdjusted)<.5:
+            newP[0] = speciesDict['init_N']*10**1
+        elif min(yAdjusted)>.9: #Small lines get smaller initial guess
+            newP[0] = speciesDict['init_N']*10**-1
         else:
-            newP[0]=10**14
+            newP[0] = speciesDict['init_N']
         newP[1] = speciesDict['init_b']
         newP[2]=(x[dif.argmax()]-wl0)/wl0
         linesP=na.append(linesP,[newP],axis=0)
@@ -290,12 +322,12 @@
     #   acceptable range, as given in dict ref
     remove=[]
     for i,p in enumerate(linesP):
-        check=_check_params(na.array([p]),speciesDict)
+        check=_check_params(na.array([p]),speciesDict,x)
         if check: 
             remove.append(i)
     linesP = na.delete(linesP,remove,axis=0)
 
-    return linesP,False
+    return linesP,flag
 
 def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
     """
@@ -489,6 +521,9 @@
     #List of lines to remove
     removeLines=[]
 
+    #Set error
+
+
     #Iterate through all sets of line parameters
     for i,p in enumerate(linesP):
 
@@ -501,16 +536,23 @@
             lb = _get_bounds(p[2],b,wl,x0,xRes)
             xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
 
+            if errBound == None:
+                errBound = 10*len(yb)*(max(1-yb)*1E-2)**2
+            else:
+                errBound = 10*errBound*len(yb)
+
             #Generate a fit and find the difference to data
             yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
             dif =yb-yFitb
 
+
+
             #Only counts as an error if line is too big ---------------<
             dif = [k for k in dif if k>0]
             err = sum(dif)
 
             #If the fit is too bad then add the line to list of removed lines
-            if err > errBound*1E2:
+            if err > errBound:
                 removeLines.append(i)
                 break
 
@@ -640,21 +682,13 @@
         #Check if the region needs to be divided
         if b[2]-b[1]>maxLength:
 
-            #Find the minimum absorption in the middle two quartiles of
-            #   the large complex
-            q=(b[2]-b[1])/4
-            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+            split = _split_region(yDat,b,splitLim)
 
-            #Only break it up if the minimum absorption is actually low enough
-            if yDat[cut]>splitLim:
-
-                #Get the new two peaks
-                b1Peak = yDat[b[1]:cut].argmin()+b[1]
-                b2Peak = yDat[cut:b[2]].argmin()+cut
+            if split:
 
                 #add the two regions separately
-                cBounds.insert(i+1,[b1Peak,b[1],cut])
-                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+                cBounds.insert(i+1,split[0])
+                cBounds.insert(i+2,split[1])
 
                 #Remove the original region
                 cBounds.pop(i)
@@ -663,7 +697,33 @@
 
     return cBounds
 
-def _gen_flux_lines(x, linesP, speciesDict):
+
+def _split_region(yDat,b,splitLim):
+        #Find the minimum absorption in the middle two quartiles of
+    #   the large complex
+
+    q=(b[2]-b[1])/4
+    cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+    #Only break it up if the minimum absorption is actually low enough
+    if yDat[cut]>splitLim:
+
+        #Get the new two peaks
+        b1Peak = yDat[b[1]:cut].argmin()+b[1]
+        b2Peak = yDat[cut:b[2]].argmin()+cut
+
+        region_1 = [b1Peak,b[1],cut]
+        region_2 = [b2Peak,cut,b[2]]
+
+        return [region_1,region_2]
+
+    else:
+
+        return []
+
+
+
+def _gen_flux_lines(x, linesP, speciesDict,firstLine=False):
     """
     Calculates the normalized flux for a region of wavelength space
     generated by a set of absorption lines.
@@ -692,6 +752,9 @@
             g=speciesDict['Gamma'][i]
             wl=speciesDict['wavelength'][i]
             y = y+ _gen_tau(x,p,f,g,wl)
+            if firstLine: 
+                break
+
     flux = na.exp(-y)
     return flux
 
@@ -744,21 +807,25 @@
         the difference between the fit generated by the parameters
         given in pTotal multiplied by the previous fit and the desired
         flux profile, w/ first index modified appropriately for bad 
-        parameter choices
+        parameter choices and additional penalty for fitting with a lower
+        flux than observed.
     """
 
     pTotal.shape = (-1,3)
     yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
 
     error = yDat-yFit*yNewFit
-    error[0] = _check_params(pTotal,speciesDict)
+    error_plus = (yDat-yFit*yNewFit).clip(min=0)
+
+    error = error+error_plus
+    error[0] = _check_params(pTotal,speciesDict,x)
 
     return error
 
-def _check_params(p, speciesDict):
+def _check_params(p, speciesDict,xb):
     """
     Check to see if any of the parameters in p fall outside the range 
-        given in speciesDict.
+        given in speciesDict or on the boundaries
 
     Parameters
     ----------
@@ -767,6 +834,8 @@
     speciesDict : dictionary
         dictionary with properties giving the max and min
         values appropriate for each parameter N,b, and z.
+    xb : (N) ndarray
+        wavelength array [nm]
 
     Returns
     -------
@@ -774,16 +843,137 @@
         0 if all values are fine
         999 if any values fall outside acceptable range
     """
+
+    minz = (xb[0])/speciesDict['wavelength'][0]-1
+    maxz = (xb[-1])/speciesDict['wavelength'][0]-1
+
     check = 0
-    if any(p[:,0] > speciesDict['maxN']) or\
-          any(p[:,0] < speciesDict['minN']) or\
-          any(p[:,1] > speciesDict['maxb']) or\
-          any(p[:,1] < speciesDict['minb']) or\
-          any(p[:,2] > speciesDict['maxz']) or\
-          any(p[:,2] < speciesDict['minz']):
+    if any(p[:,0] >= speciesDict['maxN']) or\
+          any(p[:,0] <= speciesDict['minN']) or\
+          any(p[:,1] >= speciesDict['maxb']) or\
+          any(p[:,1] <= speciesDict['minb']) or\
+          any(p[:,2] >= maxz) or\
+          any(p[:,2] <= minz):
               check = 999
+              
     return check
 
+def _check_optimization_init(p,speciesDict,initz,xb,yDat,yFit,minSize,errorBound):
+
+    """
+    Check to see if any of the parameters in p are the
+    same as initial paramters and if so, attempt to 
+    split the region and refit it.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    """
+
+    # Check if anything is a default parameter
+    if any(p[:,0] == speciesDict['init_N']) or\
+          any(p[:,0] == speciesDict['init_N']*10) or\
+          any(p[:,0] == speciesDict['init_N']*100) or\
+          any(p[:,0] == speciesDict['init_N']*.1) or\
+          any(p[:,1] == speciesDict['init_b']) or\
+          any(p[:,1] == speciesDict['maxb']):
+
+            # These are the initial bounds
+            init_bounds = [yDat.argmin(),0,len(xb)-1]
+
+            # Gratitutous limit for splitting region
+            newSplitLim = 1 - (1-min(yDat))*.5
+
+            # Attempt to split region
+            split = _split_region(yDat,init_bounds,newSplitLim)
+            
+            # If we can't split it, just reject it. Its unphysical
+            # to just keep the default parameters and we're out of
+            # options at this point
+            if not split:
+                return []
+
+            # Else set up the bounds for each region and fit separately
+            b1,b2 = split[0][2], split[1][1]
+
+            p1,flag = _complex_fit(xb[:b1], yDat[:b1], yFit[:b1],
+                            initz, minSize, errorBound, speciesDict)
+
+            p2,flag = _complex_fit(xb[b2:], yDat[b2:], yFit[b2:],
+                            initz, minSize, errorBound, speciesDict)
+
+            # Make the final line parameters. Its annoying because
+            # one or both regions may have fit to nothing
+            if na.size(p1)> 0 and na.size(p2)>0:
+                p = na.r_[p1,p2]
+            elif na.size(p1) > 0:
+                p = p1
+            else:
+                p = p2
+
+    return p
+
+
+def _check_numerical_instability(x, p, speciesDict,b):
+
+    """
+    Check to see if any of the parameters in p are causing
+    unstable numerical effects outside the region of fit
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+    x : (N) ndarray
+        wavelength array [nm]
+    b : (3) list
+        list of integers indicating bounds of region fit in x
+    """
+
+    remove_lines = []
+
+
+    for i,line in enumerate(p):
+
+        # First to check if the line is at risk for instability
+        if line[1]<5 or line[0] < 1E12:
+
+
+            # get all flux that isn't part of fit plus a little wiggle room
+            # max and min to prevent boundary errors
+
+            flux = _gen_flux_lines(x,[line],speciesDict,firstLine=True)
+            flux = na.r_[flux[:max(b[1]-10,0)], flux[min(b[2]+10,len(x)):]]
+
+            #Find regions that are absorbing outside the region we fit
+            flux_dif = 1 - flux
+            absorbing_coefficient = max(abs(flux_dif))
+
+
+            #Really there shouldn't be any absorption outside
+            #the region we fit, but we'll give some leeway.
+            #for high resolution spectra the tiny bits on the edges
+            #can give a non negligible amount of flux. Plus the errors
+            #we are looking for are HUGE.
+            if absorbing_coefficient > .1:
+
+                # we just set it to no fit because we've tried
+                # everything else at this point. this region just sucks :(
+                remove_lines.append(i)
+    
+    if remove_lines:
+        p = na.delete(p, remove_lines, axis=0)
+
+    return p
 
 def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
     """
@@ -815,4 +1005,5 @@
         f.create_dataset("{0}/z".format(ion),data=params['z'])
         f.create_dataset("{0}/complex".format(ion),data=params['group#'])
     print 'Writing spectrum fit to {0}'.format(file_name)
+    f.close()
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -63,12 +63,6 @@
     HaloProfiler, \
     FakeProfile
 
-from .hierarchy_subset.api import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile
-
 from .level_sets.api import \
     identify_contours, \
     Clump, \

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -129,7 +129,7 @@
         """
         if self.CoM is not None:
             return self.CoM
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         c = {}
         # We shift into a box where the origin is the left edge
         c[0] = self["particle_position_x"] - self.pf.domain_left_edge[0]
@@ -199,7 +199,7 @@
         """
         if self.group_total_mass is not None:
             return self.group_total_mass
-        return self["ParticleMassMsun"].sum()
+        return self["particle_mass"].in_units('Msun').sum()
 
     def bulk_velocity(self):
         r"""Returns the mass-weighted average velocity in cm/s.
@@ -213,7 +213,7 @@
         """
         if self.bulk_vel is not None:
             return self.bulk_vel
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
@@ -234,7 +234,7 @@
         if self.rms_vel is not None:
             return self.rms_vel
         bv = self.bulk_velocity()
-        pm = self["ParticleMassMsun"]
+        pm = self["particle_mass"].in_units('Msun')
         sm = pm.sum()
         vx = (self["particle_velocity_x"] - bv[0]) * pm / sm
         vy = (self["particle_velocity_y"] - bv[1]) * pm / sm
@@ -331,7 +331,7 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["ParticleMassMsun"]:
+                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
         if 'creation_time' in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
@@ -464,7 +464,7 @@
         if self["particle_position_x"].size > 1:
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                np.sum(self["ParticleMassMsun"][inds == index])
+                np.sum(self["particle_mass"][inds == index]).in_units('Msun')
         # Now forward sum the masses in the bins.
         for i in xrange(self.bin_count):
             self.mass_bins[i + 1] += self.mass_bins[i]
@@ -750,7 +750,7 @@
             inds = np.digitize(dist, self.radial_bins) - 1
             for index in np.unique(inds):
                 self.mass_bins[index] += \
-                    np.sum(self["ParticleMassMsun"][inds == index])
+                    np.sum(self["particle_mass"][inds == index]).in_units('Msun')
             # Now forward sum the masses in the bins.
             for i in xrange(self.bin_count):
                 self.mass_bins[i + 1] += self.mass_bins[i]
@@ -1356,7 +1356,7 @@
     _name = "HOP"
     _halo_class = HOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun"]
+              ["particle_mass"]
 
     def __init__(self, data_source, threshold=160.0, dm_only=True):
         self.threshold = threshold
@@ -1368,7 +1368,7 @@
             RunHOP(self.particle_fields["particle_position_x"] / self.period[0],
                 self.particle_fields["particle_position_y"] / self.period[1],
                 self.particle_fields["particle_position_z"] / self.period[2],
-                self.particle_fields["ParticleMassMsun"],
+                self.particle_fields["particle_mass"].in_units('Msun'),
                 self.threshold)
         self.particle_fields["densities"] = self.densities
         self.particle_fields["tags"] = self.tags
@@ -1555,7 +1555,7 @@
     _name = "parallelHOP"
     _halo_class = parallelHOPHalo
     _fields = ["particle_position_%s" % ax for ax in 'xyz'] + \
-              ["ParticleMassMsun", "particle_index"]
+              ["particle_mass", "particle_index"]
 
     def __init__(self, data_source, padding, num_neighbors, bounds, total_mass,
         period, threshold=160.0, dm_only=True, rearrange=True, premerge=True,
@@ -1589,8 +1589,8 @@
 
         self.comm.mpi_exit_test(exit)
         # Try to do this in a memory conservative way.
-        np.divide(self.particle_fields['ParticleMassMsun'], self.total_mass,
-            self.particle_fields['ParticleMassMsun'])
+        np.divide(self.particle_fields['particle_mass'].in_units('Msun'), self.total_mass,
+            self.particle_fields['particle_mass'])
         np.divide(self.particle_fields["particle_position_x"],
             self.old_period[0], self.particle_fields["particle_position_x"])
         np.divide(self.particle_fields["particle_position_y"],
@@ -2190,7 +2190,7 @@
         # Now we get the full box mass after we have the final composition of
         # subvolumes.
         if total_mass is None:
-            total_mass = self.comm.mpi_allreduce((self._data_source["ParticleMassMsun"].astype('float64')).sum(),
+            total_mass = self.comm.mpi_allreduce((self._data_source["particle_mass"].in_units('Msun').astype('float64')).sum(),
                                                  op='sum')
         if not self._distributed:
             self.padding = (np.zeros(3, dtype='float64'),
@@ -2386,9 +2386,9 @@
             if dm_only:
                 select = self._get_dm_indices()
                 total_mass = \
-                    self.comm.mpi_allreduce((self._data_source['all', "ParticleMassMsun"][select]).sum(dtype='float64'), op='sum')
+                    self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0], op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2409,10 +2409,10 @@
             sub_mass = total_mass
         elif dm_only:
             select = self._get_dm_indices()
-            sub_mass = self._data_source["ParticleMassMsun"][select].sum(dtype='float64')
+            sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("ParticleMassMsun")[0]
+                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx
@@ -11,234 +11,232 @@
 # Importing relevant rockstar data types particle, fof halo, halo
 
 cdef import from "particle.h":
-	struct particle:
-		np.int64_t id
-		float pos[6]
+    struct particle:
+        np.int64_t id
+        float pos[6]
 
 cdef import from "fof.h":
-	struct fof:
-		np.int64_t num_p
-		particle *particles
+    struct fof:
+        np.int64_t num_p
+        particle *particles
 
 cdef import from "halo.h":
-	struct halo:
-		np.int64_t id
-		float pos[6], corevel[3], bulkvel[3]
-		float m, r, child_r, vmax_r, mgrav,	vmax, rvmax, rs, klypin_rs, vrms
-		float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
-		float bullock_spin, kin_to_pot
-		np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
-		float min_pos_err, min_vel_err, min_bulkvel_err
+    struct halo:
+        np.int64_t id
+        float pos[6], corevel[3], bulkvel[3]
+        float m, r, child_r, vmax_r, mgrav,    vmax, rvmax, rs, klypin_rs, vrms
+        float J[3], energy, spin, alt_m[4], Xoff, Voff, b_to_a, c_to_a, A[3]
+        float bullock_spin, kin_to_pot
+        np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+        float min_pos_err, min_vel_err, min_bulkvel_err
 
 # For finding sub halos import finder function and global variable
 # rockstar uses to store the results
 
 cdef import from "groupies.h":
-	void find_subs(fof *f) 
-	halo *halos
-	np.int64_t num_halos
-	void calc_mass_definition()
+    void find_subs(fof *f) nogil
+    halo *halos
+    np.int64_t num_halos
+    void calc_mass_definition() nogil
 
 # For outputing halos, rockstar style
 
 cdef import from "meta_io.h":
-	void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) 
+    void output_halos(np.int64_t id_offset, np.int64_t snap, np.int64_t chunk, float *bounds) nogil
 
 # For setting up the configuration of rockstar
 
 cdef import from "config.h":
-	void setup_config()
+    void setup_config() nogil
 
 cdef import from "config_vars.h":
-	# Rockstar cleverly puts all of the config variables inside a templated
-	# definition of their vaiables.
-	char *FILE_FORMAT
-	np.float64_t PARTICLE_MASS
+    # Rockstar cleverly puts all of the config variables inside a templated
+    # definition of their vaiables.
+    char *FILE_FORMAT
+    np.float64_t PARTICLE_MASS
 
-	char *MASS_DEFINITION
-	np.int64_t MIN_HALO_OUTPUT_SIZE
-	np.float64_t FORCE_RES
+    char *MASS_DEFINITION
+    np.int64_t MIN_HALO_OUTPUT_SIZE
+    np.float64_t FORCE_RES
 
-	np.float64_t SCALE_NOW
-	np.float64_t h0
-	np.float64_t Ol
-	np.float64_t Om
+    np.float64_t SCALE_NOW
+    np.float64_t h0
+    np.float64_t Ol
+    np.float64_t Om
 
-	np.int64_t GADGET_ID_BYTES
-	np.float64_t GADGET_MASS_CONVERSION
-	np.float64_t GADGET_LENGTH_CONVERSION
-	np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
-	np.int64_t RESCALE_PARTICLE_MASS
+    np.int64_t GADGET_ID_BYTES
+    np.float64_t GADGET_MASS_CONVERSION
+    np.float64_t GADGET_LENGTH_CONVERSION
+    np.int64_t GADGET_SKIP_NON_HALO_PARTICLES
+    np.int64_t RESCALE_PARTICLE_MASS
 
-	np.int64_t PARALLEL_IO
-	char *PARALLEL_IO_SERVER_ADDRESS
-	char *PARALLEL_IO_SERVER_PORT
-	np.int64_t PARALLEL_IO_WRITER_PORT
-	char *PARALLEL_IO_SERVER_INTERFACE
-	char *RUN_ON_SUCCESS
+    np.int64_t PARALLEL_IO
+    char *PARALLEL_IO_SERVER_ADDRESS
+    char *PARALLEL_IO_SERVER_PORT
+    np.int64_t PARALLEL_IO_WRITER_PORT
+    char *PARALLEL_IO_SERVER_INTERFACE
+    char *RUN_ON_SUCCESS
 
-	char *INBASE
-	char *FILENAME
-	np.int64_t STARTING_SNAP
-	np.int64_t NUM_SNAPS
-	np.int64_t NUM_BLOCKS
-	np.int64_t NUM_READERS
-	np.int64_t PRELOAD_PARTICLES
-	char *SNAPSHOT_NAMES
-	char *LIGHTCONE_ALT_SNAPS
-	char *BLOCK_NAMES
+    char *INBASE
+    char *FILENAME
+    np.int64_t STARTING_SNAP
+    np.int64_t NUM_SNAPS
+    np.int64_t NUM_BLOCKS
+    np.int64_t NUM_READERS
+    np.int64_t PRELOAD_PARTICLES
+    char *SNAPSHOT_NAMES
+    char *LIGHTCONE_ALT_SNAPS
+    char *BLOCK_NAMES
 
-	char *OUTBASE
-	np.float64_t OVERLAP_LENGTH
-	np.int64_t NUM_WRITERS
-	np.int64_t FORK_READERS_FROM_WRITERS
-	np.int64_t FORK_PROCESSORS_PER_MACHINE
+    char *OUTBASE
+    np.float64_t OVERLAP_LENGTH
+    np.int64_t NUM_WRITERS
+    np.int64_t FORK_READERS_FROM_WRITERS
+    np.int64_t FORK_PROCESSORS_PER_MACHINE
 
-	char *OUTPUT_FORMAT
-	np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
-	np.int64_t FULL_PARTICLE_CHUNKS
-	char *BGC2_SNAPNAMES
+    char *OUTPUT_FORMAT
+    np.int64_t DELETE_BINARY_OUTPUT_AFTER_FINISHED
+    np.int64_t FULL_PARTICLE_CHUNKS
+    char *BGC2_SNAPNAMES
 
-	np.int64_t BOUND_PROPS
-	np.int64_t BOUND_OUT_TO_HALO_EDGE
-	np.int64_t DO_MERGER_TREE_ONLY
-	np.int64_t IGNORE_PARTICLE_IDS
-	np.float64_t TRIM_OVERLAP
-	np.float64_t ROUND_AFTER_TRIM
-	np.int64_t LIGHTCONE
-	np.int64_t PERIODIC
+    np.int64_t BOUND_PROPS
+    np.int64_t BOUND_OUT_TO_HALO_EDGE
+    np.int64_t DO_MERGER_TREE_ONLY
+    np.int64_t IGNORE_PARTICLE_IDS
+    np.float64_t TRIM_OVERLAP
+    np.float64_t ROUND_AFTER_TRIM
+    np.int64_t LIGHTCONE
+    np.int64_t PERIODIC
 
-	np.float64_t LIGHTCONE_ORIGIN[3]
-	np.float64_t LIGHTCONE_ALT_ORIGIN[3]
+    np.float64_t LIGHTCONE_ORIGIN[3]
+    np.float64_t LIGHTCONE_ALT_ORIGIN[3]
 
-	np.float64_t LIMIT_CENTER[3]
-	np.float64_t LIMIT_RADIUS
+    np.float64_t LIMIT_CENTER[3]
+    np.float64_t LIMIT_RADIUS
 
-	np.int64_t SWAP_ENDIANNESS
-	np.int64_t GADGET_VARIANT
+    np.int64_t SWAP_ENDIANNESS
+    np.int64_t GADGET_VARIANT
 
-	np.float64_t FOF_FRACTION
-	np.float64_t FOF_LINKING_LENGTH
-	np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
-	np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
-	np.int64_t TEMPORAL_HALO_FINDING
-	np.int64_t MIN_HALO_PARTICLES
-	np.float64_t UNBOUND_THRESHOLD
-	np.int64_t ALT_NFW_METRIC
+    np.float64_t FOF_FRACTION
+    np.float64_t FOF_LINKING_LENGTH
+    np.float64_t INCLUDE_HOST_POTENTIAL_RATIO
+    np.float64_t DOUBLE_COUNT_SUBHALO_MASS_RATIO
+    np.int64_t TEMPORAL_HALO_FINDING
+    np.int64_t MIN_HALO_PARTICLES
+    np.float64_t UNBOUND_THRESHOLD
+    np.int64_t ALT_NFW_METRIC
 
-	np.int64_t TOTAL_PARTICLES
-	np.float64_t BOX_SIZE
-	np.int64_t OUTPUT_HMAD
-	np.int64_t OUTPUT_PARTICLES
-	np.int64_t OUTPUT_LEVELS
-	np.float64_t DUMP_PARTICLES[3]
+    np.int64_t TOTAL_PARTICLES
+    np.float64_t BOX_SIZE
+    np.int64_t OUTPUT_HMAD
+    np.int64_t OUTPUT_PARTICLES
+    np.int64_t OUTPUT_LEVELS
+    np.float64_t DUMP_PARTICLES[3]
 
-	np.float64_t AVG_PARTICLE_SPACING
-	np.int64_t SINGLE_SNAP
+    np.float64_t AVG_PARTICLE_SPACING
+    np.int64_t SINGLE_SNAP
 
 
 
 cdef class RockstarGroupiesInterface:
-	
-	cdef public object pf
-	cdef public object fof
+    
+    cdef public object pf
+    cdef public object fof
 
-	# For future use/consistency
-	def __cinit__(self,pf):
-		self.pf = pf
+    # For future use/consistency
+    def __cinit__(self,pf):
+        self.pf = pf
 
-	def setup_rockstar(self,
-						particle_mass,
-						int periodic = 1, force_res=None,
-						int min_halo_size = 25, outbase = "None",
-						callbacks = None):
-		global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
-		global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
-		global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
-		global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
-		global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
-		
+    def setup_rockstar(self,
+                        particle_mass,
+                        int periodic = 1, force_res=None,
+                        int min_halo_size = 25, outbase = "None",
+                        callbacks = None):
+        global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
+        global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
+        global FORK_READERS_FROM_WRITERS, PARALLEL_IO_WRITER_PORT, NUM_WRITERS
+        global rh, SCALE_NOW, OUTBASE, MIN_HALO_OUTPUT_SIZE
+        global OVERLAP_LENGTH, TOTAL_PARTICLES, FORCE_RES
+        
 
-		if force_res is not None:
-			FORCE_RES=np.float64(force_res)
+        if force_res is not None:
+            FORCE_RES=np.float64(force_res)
 
-		OVERLAP_LENGTH = 0.0
-		
-		FILENAME = "inline.<block>"
-		FILE_FORMAT = "GENERIC"
-		OUTPUT_FORMAT = "ASCII"
-		MIN_HALO_OUTPUT_SIZE=min_halo_size
-		
-		pf = self.pf
+        OVERLAP_LENGTH = 0.0
+        
+        FILENAME = "inline.<block>"
+        FILE_FORMAT = "GENERIC"
+        OUTPUT_FORMAT = "ASCII"
+        MIN_HALO_OUTPUT_SIZE=min_halo_size
+        
+        pf = self.pf
 
-		h0 = pf.hubble_constant
-		Ol = pf.omega_lambda
-		Om = pf.omega_matter
-		
-		SCALE_NOW = 1.0/(pf.current_redshift+1.0)
-		
-		if not outbase =='None'.decode('UTF-8'):
-			#output directory. since we can't change the output filenames
-			#workaround is to make a new directory
-			OUTBASE = outbase 
+        h0 = pf.hubble_constant
+        Ol = pf.omega_lambda
+        Om = pf.omega_matter
+        
+        SCALE_NOW = 1.0/(pf.current_redshift+1.0)
+        
+        if not outbase =='None'.decode('UTF-8'):
+            #output directory. since we can't change the output filenames
+            #workaround is to make a new directory
+            OUTBASE = outbase 
 
 
-		PARTICLE_MASS = particle_mass.in_units('Msun/h')
-		PERIODIC = periodic
-		BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
+        PARTICLE_MASS = particle_mass.in_units('Msun/h')
+        PERIODIC = periodic
+        BOX_SIZE = pf.domain_width.in_units('Mpccm/h')[0]
 
-		# Set up the configuration options
-		setup_config()
+        # Set up the configuration options
+        setup_config()
 
-		# Needs to be called so rockstar can use the particle mass parameter
-		# to calculate virial quantities properly
-		calc_mass_definition()
+        # Needs to be called so rockstar can use the particle mass parameter
+        # to calculate virial quantities properly
+        calc_mass_definition()
 
+    def output_halos(self):
+        output_halos(0, 0, 0, NULL) 
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    def make_rockstar_fof(self, np.ndarray[np.int64_t, ndim=1] pid,
+                                np.ndarray[np.float64_t, ndim=2] pos,
+                                np.ndarray[np.float64_t, ndim=2] vel,
+                                np.ndarray[np.int64_t, ndim=1] fof_tags,
+                                np.int64_t nfof,
+                                np.int64_t npart_max):
 
-	def make_rockstar_fof(self,fof_ids, pos, vel):
+        # Define fof object
 
-		# Turn positions and velocities into units we want
-		pos = pos.in_units('Mpccm/h')
-		vel = vel.in_units('km/s')
+        # Find number of particles
+        cdef np.int64_t i, j
+        cdef np.int64_t num_particles = pid.shape[0]
 
-		# Define fof object
-		cdef fof fof_obj
+        # Allocate space for correct number of particles
+        cdef particle* particles = <particle*> malloc(npart_max * sizeof(particle))
+        cdef fof fof_obj
+        fof_obj.particles = particles
 
-		# Find number of particles
-		cdef np.int64_t num_particles = len(fof_ids)
+        cdef np.int64_t last_fof_tag = 1
+        cdef np.int64_t k = 0
+        for i in range(num_particles):
+            if fof_tags[i] == 0:
+                continue
+            if fof_tags[i] != last_fof_tag:
+                last_fof_tag = fof_tags[i]
+                if k > 16:
+                    print "Finding subs", k, i
+                    fof_obj.num_p = k
+                    find_subs(&fof_obj)
+                k = 0
+            particles[k].id = pid[i]
 
-		# Allocate space for correct number of particles
-		cdef particle* particles = <particle*> malloc(num_particles * sizeof(particle))
+            # fill in locations & velocities
+            for j in range(3):
+                particles[k].pos[j] = pos[i,j]
+                particles[k].pos[j+3] = vel[i,j]
+            k += 1
+        free(particles)
 
-		# Fill in array of particles with particle that fof identified
-		# This is possibly the slowest way to code this, but for now
-		# I just want it to work
-		for i,id in enumerate(fof_ids):
-			particles[i].id = id
 
-			# fill in locations & velocities
-			for j in range(3):
-				particles[i].pos[j] = pos[id][j]
-				particles[i].pos[j+3] = vel[id][j]
 
-
-		# Assign pointer to particles into FOF object 
-		fof_obj.particles = particles
-
-		# Assign number of particles into FOF object
-		fof_obj.num_p = num_particles
-
-		# Make pointer to fof object
-		cdef fof* fof_pointer = & fof_obj
-
-		# Find the sub halos using rockstar by passing a pointer to the fof object
-		find_subs( fof_pointer)
-
-		# Output the halos, rockstar style
-		output_halos(0, 0, 0, NULL) 
-
-		free(particles)
-
-
-

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/halo_finding/rockstar/setup.py
--- a/yt/analysis_modules/halo_finding/rockstar/setup.py
+++ b/yt/analysis_modules/halo_finding/rockstar/setup.py
@@ -24,5 +24,12 @@
                          include_dirs=[rd,
                                        os.path.join(rd, "io"),
                                        os.path.join(rd, "util")])
+    config.add_extension("rockstar_groupies",
+                         "yt/analysis_modules/halo_finding/rockstar/rockstar_groupies.pyx",
+                         library_dirs=[rd],
+                         libraries=["rockstar"],
+                         include_dirs=[rd,
+                                       os.path.join(rd, "io"),
+                                       os.path.join(rd, "util")])
     return config
 

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/halo_finding/setup.py
--- a/yt/analysis_modules/halo_finding/setup.py
+++ b/yt/analysis_modules/halo_finding/setup.py
@@ -1,9 +1,7 @@
 #!/usr/bin/env python
-import setuptools
-import os
-import sys
 import os.path
 
+
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('halo_finding', parent_package, top_path)
@@ -12,6 +10,5 @@
     config.add_subpackage("parallel_hop")
     if os.path.exists("rockstar.cfg"):
         config.add_subpackage("rockstar")
-    config.make_config_py() # installs __config__.py
-    #config.make_svn_version_py()
+    config.make_config_py()  # installs __config__.py
     return config

diff -r 5c14a1aba1dae8a0c921a6fe0df2faa554c31168 -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 yt/analysis_modules/hierarchy_subset/api.py
--- a/yt/analysis_modules/hierarchy_subset/api.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-API for hierarchy_subset
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .hierarchy_subset import \
-    ConstructedRootGrid, \
-    AMRExtractedGridProxy, \
-    ExtractedHierarchy, \
-    ExtractedParameterFile

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/f1032b474045/
Changeset:   f1032b474045
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-17 15:06:08
Summary:     Removing manual species field by density call for Enzo.  Updating tests.
Affected #:  2 files

diff -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -124,7 +124,6 @@
         if species != "Electron":
             self.alias(("gas", "%s_density" % yt_name),
                        ("enzo", "%s_Density" % species))
-        add_species_field_by_density(self, "gas", yt_name)
 
     def setup_species_fields(self):
         species_names = [fn.rsplit("_Density")[0] for ft, fn in 

diff -r 7e29fc83eb6185d285f86486635fd3bcc4a0ef64 -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd yt/frontends/enzo/tests/test_outputs.py
--- a/yt/frontends/enzo/tests/test_outputs.py
+++ b/yt/frontends/enzo/tests/test_outputs.py
@@ -24,6 +24,32 @@
 _fields = ("temperature", "density", "velocity_magnitude",
            "velocity_divergence")
 
+def check_color_conservation(pf):
+    species_names = pf.field_info.species_names
+    dd = pf.all_data()
+    dens_yt = dd["density"].copy()
+    # Enumerate our species here
+    for s in sorted(species_names):
+        if s == "El": continue
+        dens_yt -= dd["%s_density" % s]
+    dens_yt -= dd["metal_density"]
+    delta_yt = np.abs(dens_yt / dd["density"])
+
+    # Now we compare color conservation to Enzo's color conservation
+    dd = pf.all_data()
+    dens_enzo = dd["Density"].copy()
+    for f in sorted(pf.field_list):
+        if not f[1].endswith("_Density") or \
+               f[1].startswith("Dark_Matter_")  or \
+               f[1].startswith("Electron_") or \
+               f[1].startswith("SFR_") or \
+               f[1].startswith("Forming_Stellar_") or \
+               f[1].startswith("Star_Particle_"):
+            continue
+        dens_enzo -= dd[f]
+    delta_enzo = np.abs(dens_enzo / dd["Density"])
+    return assert_almost_equal, delta_yt, delta_enzo
+
 m7 = "DD0010/moving7_0010"
 @requires_pf(m7)
 def test_moving7():
@@ -37,7 +63,15 @@
 @requires_pf(g30, big_data=True)
 def test_galaxy0030():
     pf = data_dir_load(g30)
+    yield check_color_conservation(pf)
     yield assert_equal, str(pf), "galaxy0030"
     for test in big_patch_amr(g30, _fields):
         test_galaxy0030.__name__ = test.description
         yield test
+
+ecp = "enzo_cosmology_plus/DD0046/DD0046"
+ at requires_pf(ecp, big_data=True)
+def test_ecp():
+    pf = data_dir_load(ecp)
+    # Now we test our species fields
+    yield check_color_conservation(pf)


https://bitbucket.org/yt_analysis/yt/commits/ef5172e8e55d/
Changeset:   ef5172e8e55d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2014-04-20 02:06:38
Summary:     Merging to tip.
Affected #:  21 files

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 doc/source/analyzing/analysis_modules/halo_analysis.rst
--- a/doc/source/analyzing/analysis_modules/halo_analysis.rst
+++ b/doc/source/analyzing/analysis_modules/halo_analysis.rst
@@ -6,6 +6,7 @@
 
 .. toctree::
    :maxdepth: 1
+
    halo_catalogs
    halo_finding
    halo_mass_function

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -154,6 +154,9 @@
 from yt.convenience import \
     load, simulation
 
+from yt.testing import \
+    run_nose
+
 # Import some helpful math utilities
 from yt.utilities.math_utils import \
     ortho_find, quartiles, periodic_position

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/api.py
--- a/yt/analysis_modules/halo_analysis/api.py
+++ b/yt/analysis_modules/halo_analysis/api.py
@@ -20,6 +20,9 @@
 from .halo_callbacks import \
      add_callback
 
+from .halo_finding_methods import \
+     add_finding_method
+
 from .halo_filters import \
      add_filter
      

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/finding_methods.py
--- a/yt/analysis_modules/halo_analysis/finding_methods.py
+++ /dev/null
@@ -1,20 +0,0 @@
-"""
-Halo Finding methods
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .operator_registry import \
-    hf_registry
-
-class HaloFindingMethod(object):
-    pass

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/halo_catalog.py
--- a/yt/analysis_modules/halo_analysis/halo_catalog.py
+++ b/yt/analysis_modules/halo_analysis/halo_catalog.py
@@ -30,20 +30,9 @@
 from .operator_registry import \
      callback_registry, \
      filter_registry, \
-     hf_registry, \
+     finding_method_registry, \
      quantity_registry
 
-from yt.analysis_modules.halo_finding.halo_objects import \
-    FOFHaloFinder, HOPHaloFinder
-from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
-    HaloCatalogDataset
-from yt.frontends.stream.data_structures import \
-    load_particles
-from yt.frontends.halo_catalogs.rockstar.data_structures import \
-    RockstarDataset
-from yt.analysis_modules.halo_finding.rockstar.api import \
-    RockstarHaloFinder
-
 class HaloCatalog(ParallelAnalysisInterface):
     r"""Create a HaloCatalog: an object that allows for the creation and association
     of data with a set of halo objects.
@@ -103,7 +92,7 @@
 
     See Also
     --------
-    add_callback, add_filter, add_quantity
+    add_callback, add_filter, add_finding_method, add_quantity
     
     """
     
@@ -113,7 +102,6 @@
         ParallelAnalysisInterface.__init__(self)
         self.halos_pf = halos_pf
         self.data_pf = data_pf
-        self.finder_method = finder_method
         self.output_dir = ensure_dir(output_dir)
         if os.path.basename(self.output_dir) != ".":
             self.output_prefix = os.path.basename(self.output_dir)
@@ -133,6 +121,10 @@
                 data_source = data_pf.h.all_data()
         self.data_source = data_source
 
+        if finder_method is not None:
+            finder_method = finding_method_registry.find(finder_method)
+        self.finder_method = finder_method            
+        
         # all of the analysis actions to be performed: callbacks, filters, and quantities
         self.actions = []
         # fields to be written to the halo catalog
@@ -358,16 +350,14 @@
 
         if self.halos_pf is None:
             # Find the halos and make a dataset of them
-            particles_pf = self.find_halos()
+            self.halos_pf = self.finder_method(self.data_pf)
 
             # Assign pf and data sources appropriately
-            self.halos_pf = particles_pf
-            self.data_source = particles_pf.all_data()
+            self.data_source = self.halos_pf.all_data()
 
             # Add all of the default quantities that all halos must have
             self.add_default_quantities('all')
 
-
         my_index = np.argsort(self.data_source["particle_identifier"])
         for i in parallel_objects(my_index, njobs=njobs, dynamic=dynamic):
             new_halo = Halo(self)
@@ -400,80 +390,6 @@
         if save_catalog:
             self.save_catalog()
 
-    def find_halos(self):
-
-        finder_method = (self.finder_method).lower()
-
-        if finder_method == "hop":
-            halo_list = HOPHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-
-        elif finder_method == "fof":
-            halo_list = FOFHaloFinder(self.data_pf)
-            halos_pf = self._parse_old_halo_list(halo_list)
-            
-        elif finder_method == 'rockstar':
-            rh = RockstarHaloFinder(self.data_pf, 
-                outbase='{0}/rockstar_halos'.format(self.output_prefix))
-            rh.run()
-            halos_pf = RockstarDataset('{0}/rockstar_halos/halos_0.0.bin'.format(self.output_prefix))
-            halos_pf.create_field_info()
-        else:
-            raise RuntimeError("finder_method must be 'fof', 'hop', or 'rockstar'")
-
-        for attr in ["current_redshift", "current_time",
-                     "domain_dimensions",
-                     "cosmological_simulation", "omega_lambda",
-                     "omega_matter", "hubble_constant"]:
-            attr_val = getattr(self.data_pf, attr)
-            setattr(halos_pf, attr, attr_val)
-        halos_pf.current_time = halos_pf.current_time.in_cgs()
-
-        return halos_pf
-
-    def _parse_old_halo_list(self, halo_list):
-
-
-        data_pf = self.data_pf
-        num_halos = len(halo_list)
-
-        # Set up fields that we want to pull from identified halos and their units
-        new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
-            'particle_position_y','particle_position_z',
-            'virial_radius']
-        new_units = [ '', 'g', 'cm', 'cm','cm','cm']
-
-        # Set up a dictionary based on those fields 
-        # with empty arrays where we will fill in their values
-        halo_properties = { f : (np.zeros(num_halos),unit) \
-            for f, unit in zip(new_fields,new_units)}
-
-        # Iterate through the halos pulling out their positions and virial quantities
-        # and filling in the properties dictionary
-        for i,halo in enumerate(halo_list):
-            halo_properties['particle_identifier'][0][i] = i
-            halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
-            halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
-
-            com = halo.center_of_mass().in_cgs()
-            halo_properties['particle_position_x'][0][i] = com[0]
-            halo_properties['particle_position_y'][0][i] = com[1]
-            halo_properties['particle_position_z'][0][i] = com[2]
-
-        # Define a bounding box based on original data pf
-        bbox = np.array([data_pf.domain_left_edge.in_cgs(),
-                data_pf.domain_right_edge.in_cgs()]).T
-
-        # Create a pf with the halos as particles
-        particle_pf = load_particles(halo_properties, 
-                bbox=bbox, length_unit = 1, mass_unit=1)
-
-        # Create the field info dictionary so we can reference those fields
-        particle_pf.create_field_info()
-
-        return particle_pf
-
-
     def save_catalog(self):
         "Write out hdf5 file with all halo quantities."
 
@@ -513,4 +429,3 @@
         self.add_quantity("particle_position_z", field_type=field_type)
         self.add_quantity("virial_radius", field_type=field_type)
 
-

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/halo_filters.py
--- a/yt/analysis_modules/halo_analysis/halo_filters.py
+++ b/yt/analysis_modules/halo_analysis/halo_filters.py
@@ -13,6 +13,10 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+
+from yt.utilities.spatial import KDTree
+
 from .halo_callbacks import HaloCallback
 from .operator_registry import filter_registry
 
@@ -58,3 +62,44 @@
     return eval("%s %s %s" % (h_value, operator, value))
 
 add_filter("quantity_value", quantity_value)
+
+def _not_subhalo(halo, field_type="halos"):
+    """
+    Only return true if this halo is not a subhalo.
+    
+    This is used for halo finders such as Rockstar that output parent
+    and subhalos together.
+    """
+
+    if not hasattr(halo.halo_catalog, "parent_dict"):
+        halo.halo_catalog.parent_dict = \
+          create_parent_dict(halo.halo_catalog.data_source, ptype=field_type)
+    return halo.halo_catalog.parent_dict[int(halo.quantities["particle_identifier"])] == -1
+add_filter("not_subhalo", _not_subhalo)
+
+def create_parent_dict(data_source, ptype="halos"):
+    """
+    Create a dictionary of halo parents to allow for filtering of subhalos.
+
+    For a pair of halos whose distance is smaller than the radius of at least 
+    one of the halos, the parent is defined as the halo with the larger radius.
+    Parent halos (halos with no parents of their own) have parent index values of -1.
+    """
+    pos = np.rollaxis(
+        np.array([data_source[ptype, "particle_position_x"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_y"].in_units("Mpc"),
+                  data_source[ptype, "particle_position_z"].in_units("Mpc")]), 1)
+    rad = data_source[ptype, "virial_radius"].in_units("Mpc").to_ndarray()
+    ids = data_source[ptype, "particle_identifier"].to_ndarray().astype("int")
+    parents = -1 * np.ones_like(ids, dtype="int")
+    my_tree = KDTree(pos)
+
+    for i in xrange(ids.size):
+        neighbors = np.array(
+            my_tree.query_ball_point(pos[i], rad[i], p=2))
+        if neighbors.size > 1:
+            parents[neighbors] = ids[neighbors[np.argmax(rad[neighbors])]]
+
+    parents[ids == parents] = -1
+    parent_dict = dict(zip(ids, parents))
+    return parent_dict

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/halo_finding_methods.py
--- /dev/null
+++ b/yt/analysis_modules/halo_analysis/halo_finding_methods.py
@@ -0,0 +1,132 @@
+"""
+Halo Finding methods
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.analysis_modules.halo_finding.halo_objects import \
+    FOFHaloFinder, HOPHaloFinder
+from yt.frontends.halo_catalogs.halo_catalog.data_structures import \
+    HaloCatalogDataset
+from yt.frontends.stream.data_structures import \
+    load_particles
+
+from .operator_registry import \
+    finding_method_registry
+
+def add_finding_method(name, function):
+    finding_method_registry[name] = HaloFindingMethod(function)
+    
+class HaloFindingMethod(object):
+    r"""
+    A halo finding method is a callback that performs halo finding on a 
+    dataset and returns a new dataset that is the loaded halo finder output.
+    """
+    def __init__(self, function, args=None, kwargs=None):
+        self.function = function
+        self.args = args
+        if self.args is None: self.args = []
+        self.kwargs = kwargs
+        if self.kwargs is None: self.kwargs = {}
+
+    def __call__(self, ds):
+        return self.function(ds, *self.args, **self.kwargs)
+
+def _hop_method(pf):
+    r"""
+    Run the Hop halo finding method.
+    """
+    
+    halo_list = HOPHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("hop", _hop_method)
+
+def _fof_method(pf):
+    r"""
+    Run the FoF halo finding method.
+    """
+
+    halo_list = FOFHaloFinder(pf)
+    halos_pf = _parse_old_halo_list(pf, halo_list)
+    return halos_pf
+add_finding_method("fof", _fof_method)
+
+def _rockstar_method(pf):
+    r"""
+    Run the Rockstar halo finding method.
+    """
+
+    from yt.frontends.halo_catalogs.rockstar.data_structures import \
+     RockstarDataset
+    from yt.analysis_modules.halo_finding.rockstar.api import \
+     RockstarHaloFinder
+    
+    rh = RockstarHaloFinder(pf)
+    rh.run()
+    halos_pf = RockstarDataset("rockstar_halos/halos_0.0.bin")
+    halos_pf.create_field_info()
+    return halos_pf
+add_finding_method("rockstar", _rockstar_method)
+
+def _parse_old_halo_list(data_pf, halo_list):
+    r"""
+    Convert the halo list into a loaded dataset.
+    """
+
+    num_halos = len(halo_list)
+
+    # Set up fields that we want to pull from identified halos and their units
+    new_fields = ['particle_identifier', 'particle_mass', 'particle_position_x', 
+        'particle_position_y','particle_position_z',
+        'virial_radius']
+    new_units = [ '', 'g', 'cm', 'cm','cm','cm']
+
+    # Set up a dictionary based on those fields 
+    # with empty arrays where we will fill in their values
+    halo_properties = { f : (np.zeros(num_halos),unit) \
+        for f, unit in zip(new_fields,new_units)}
+
+    # Iterate through the halos pulling out their positions and virial quantities
+    # and filling in the properties dictionary
+    for i,halo in enumerate(halo_list):
+        halo_properties['particle_identifier'][0][i] = i
+        halo_properties['particle_mass'][0][i] = halo.virial_mass().in_cgs()
+        halo_properties['virial_radius'][0][i] = halo.virial_radius().in_cgs()
+
+        com = halo.center_of_mass().in_cgs()
+        halo_properties['particle_position_x'][0][i] = com[0]
+        halo_properties['particle_position_y'][0][i] = com[1]
+        halo_properties['particle_position_z'][0][i] = com[2]
+
+    # Define a bounding box based on original data pf
+    bbox = np.array([data_pf.domain_left_edge.in_cgs(),
+            data_pf.domain_right_edge.in_cgs()]).T
+
+    # Create a pf with the halos as particles
+    particle_pf = load_particles(halo_properties, 
+            bbox=bbox, length_unit = 1, mass_unit=1)
+
+    # Create the field info dictionary so we can reference those fields
+    particle_pf.create_field_info()
+
+    for attr in ["current_redshift", "current_time",
+                 "domain_dimensions",
+                 "cosmological_simulation", "omega_lambda",
+                 "omega_matter", "hubble_constant"]:
+        attr_val = getattr(data_pf, attr)
+        setattr(particle_pf, attr, attr_val)
+    particle_pf.current_time = particle_pf.current_time.in_cgs()
+    
+    return particle_pf

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_analysis/operator_registry.py
--- a/yt/analysis_modules/halo_analysis/operator_registry.py
+++ b/yt/analysis_modules/halo_analysis/operator_registry.py
@@ -27,5 +27,5 @@
 
 callback_registry = OperatorRegistry()
 filter_registry = OperatorRegistry()
-hf_registry = OperatorRegistry()
+finding_method_registry = OperatorRegistry()
 quantity_registry = OperatorRegistry()

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -110,7 +110,9 @@
         if self._name == "RockstarHalo":
             ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
         elif self._name == "LoadedHalo":
-            ds = self.pf.sphere(self.CoM, self._radjust * self.max_radius)
+            ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
         sp_pid = ds['particle_index']
         self._ds_sort = sp_pid.argsort()
         sp_pid = sp_pid[self._ds_sort]
@@ -217,7 +219,7 @@
         vx = (self["particle_velocity_x"] * pm).sum()
         vy = (self["particle_velocity_y"] * pm).sum()
         vz = (self["particle_velocity_z"] * pm).sum()
-        return np.array([vx, vy, vz]) / pm.sum()
+        return self.pf.arr([vx, vy, vz], vx.units) / pm.sum()
 
     def rms_velocity(self):
         r"""Returns the mass-weighted RMS velocity for the halo
@@ -331,9 +333,11 @@
         handle.create_group("/%s" % gn)
         for field in ["particle_position_%s" % ax for ax in 'xyz'] \
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
-                   + ["particle_index"] + ["particle_mass"].in_units('Msun'):
+                   + ["particle_index"]:
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
-        if 'creation_time' in self.data.pf.field_list:
+	handle.create_dataset("/%s/particle_mass" % gn,
+		data=self["particle_mass"].in_units('Msun'))
+        if ('io','creation_time') in self.data.pf.field_list:
             handle.create_dataset("/%s/creation_time" % gn,
                 data=self['creation_time'])
         n = handle["/%s" % gn]
@@ -848,6 +852,7 @@
         self._saved_fields = {}
         self._ds_sort = None
         self._particle_mask = None
+	self._pid_sort = None
 
 
     def __getitem__(self, key):
@@ -865,14 +870,28 @@
             self.size, key)
         if field_data is not None:
             if key == 'particle_index':
-                field_data = field_data[field_data.argsort()]
+                #this is an index for turning data sorted by particle index 
+		#into the same order as the fields on disk
+		self._pid_sort = field_data.argsort().argsort()
+	    #convert to YTArray using the data from disk
+	    if key == 'particle_mass':
+		field_data = self.pf.arr(field_data, 'Msun')
+	    else:
+	        field_data = self.pf.arr(field_data, 
+		    self.pf._get_field_info('unknown',key).units)
             self._saved_fields[key] = field_data
             return self._saved_fields[key]
         # We won't store this field below in saved_fields because
         # that would mean keeping two copies of it, one in the yt
         # machinery and one here.
-        ds = self.pf.sphere(self.CoM, 1.05 * self.max_radius)
-        return np.take(ds[key][self._ds_sort], self.particle_mask)
+        ds = self.pf.sphere(self.CoM, np.maximum(self._radjust * \
+	    self.pf.quan(self.max_radius, 'code_length'), \
+	    self.pf.index.get_smallest_dx()))
+	# If particle_mask hasn't been called once then _ds_sort won't have
+	# the proper values set yet
+        if self._particle_mask is None:
+	    self.particle_mask
+        return ds[key][self._ds_sort][self.particle_mask][self._pid_sort]
 
     def _get_particle_data(self, halo, fnames, size, field):
         # Given a list of file names, a halo, its size, and the desired field,
@@ -1087,10 +1106,10 @@
         gc.collect()
 
     def _get_dm_indices(self):
-        if 'creation_time' in self._data_source.index.field_list:
+        if ('io','creation_time') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on creation time")
             return (self._data_source["creation_time"] <= 0)
-        elif 'particle_type' in self._data_source.index.field_list:
+        elif ('io','particle_type') in self._data_source.index.field_list:
             mylog.debug("Differentiating based on particle type")
             return (self._data_source["particle_type"] == 1)
         else:
@@ -2141,7 +2160,7 @@
         elif fancy_padding and self._distributed:
             LE_padding = np.empty(3, dtype='float64')
             RE_padding = np.empty(3, dtype='float64')
-            avg_spacing = (float(vol) / data.size) ** (1. / 3.)
+            avg_spacing = (vol / data.size) ** (1. / 3.)
             base_padding = (self.num_neighbors) ** (1. / 3.) * self.safety * \
                 avg_spacing
             for dim in xrange(3):
@@ -2388,7 +2407,7 @@
                 total_mass = \
                     self.comm.mpi_allreduce((self._data_source['all', "particle_mass"][select].in_units('Msun')).sum(dtype='float64'), op='sum')
             else:
-                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun'), op='sum')
+                total_mass = self.comm.mpi_allreduce(self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun'), op='sum')
         # MJT: Note that instead of this, if we are assuming that the particles
         # are all on different processors, we should instead construct an
         # object representing the entire domain and sum it "lazily" with
@@ -2412,7 +2431,7 @@
             sub_mass = self._data_source["particle_mass"][select].in_units('Msun').sum(dtype='float64')
         else:
             sub_mass = \
-                self._data_source.quantities["TotalQuantity"]("particle_mass")[0].in_units('Msun')
+                self._data_source.quantities["TotalQuantity"]("particle_mass").in_units('Msun')
         HOPHaloList.__init__(self, self._data_source,
             threshold * total_mass / sub_mass, dm_only)
         self._parse_halolist(total_mass / sub_mass)

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
--- a/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
+++ b/yt/analysis_modules/halo_finding/parallel_hop/parallel_hop_interface.py
@@ -53,7 +53,7 @@
         self.zpos = particle_fields.pop("particle_position_z")
         self.real_size = len(self.xpos)
         self.index = particle_fields.pop("particle_index")
-        self.mass = particle_fields.pop("ParticleMassMsun")
+        self.mass = particle_fields.pop("particle_mass")
         self.padded_particles = []
         self.nMerge = 4
         self.tree = tree

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -478,9 +478,14 @@
         return tuple(self.ActiveDimensions.tolist())
 
     def _setup_data_source(self):
-        self._data_source = self.pf.region(self.center,
-            self.left_edge - self.base_dds,
-            self.right_edge + self.base_dds)
+        LE = self.left_edge - self.base_dds
+        RE = self.right_edge + self.base_dds
+        if not all(self.pf.periodicity):
+            for i in range(3):
+                if self.pf.periodicity[i]: continue
+                LE[i] = max(LE[i], self.pf.domain_left_edge[i])
+                RE[i] = min(RE[i], self.pf.domain_right_edge[i])
+        self._data_source = self.pf.region(self.center, LE, RE)
         self._data_source.min_level = 0
         self._data_source.max_level = self.level
         self._pdata_source = self.pf.region(self.center,

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -624,7 +624,7 @@
         self.unit_registry.modify("code_length", self.length_unit)
         self.unit_registry.modify("code_mass", self.mass_unit)
         self.unit_registry.modify("code_time", self.time_unit)
-        vel_unit = getattr(self, "code_velocity",
+        vel_unit = getattr(self, "velocity_unit",
                     self.length_unit / self.time_unit)
         self.unit_registry.modify("code_velocity", vel_unit)
         # domain_width does not yet exist

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/_skeleton/api.py
--- a/yt/frontends/_skeleton/api.py
+++ b/yt/frontends/_skeleton/api.py
@@ -20,7 +20,7 @@
 
 from .fields import \
       SkeletonFieldInfo, \
-      add_flash_field
+      add_skeleton_field
 
 from .io import \
       IOHandlerSkeleton

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/_skeleton/data_structures.py
--- a/yt/frontends/_skeleton/data_structures.py
+++ b/yt/frontends/_skeleton/data_structures.py
@@ -13,36 +13,30 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import stat
 import numpy as np
-import weakref
 
-from yt.funcs import *
 from yt.data_objects.grid_patch import \
     AMRGridPatch
-from yt.data_objects.index import \
-    AMRHierarchy
+from yt.data_objects.grid_patch import \
+    AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+    GridIndex
 from yt.data_objects.static_output import \
     Dataset
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.io_handler import \
-    io_registry
-from yt.utilities.physical_constants import cm_per_mpc
-from .fields import SkeletonFieldInfo, add_flash_field, KnownSkeletonFields
-from yt.fields.field_info_container import \
-    FieldInfoContainer, NullFunc, ValidateDataField, TranslationFunc
+from yt.utilities.lib.misc_utilities import \
+    get_box_grids_level
 
 class SkeletonGrid(AMRGridPatch):
     _id_offset = 0
-    #__slots__ = ["_level_id", "stop_index"]
-    def __init__(self, id, index, level):
-        AMRGridPatch.__init__(self, id, filename = index.index_filename,
-                              index = index)
-        self.Parent = None
+    def __init__(self, id, index, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename=index.index_filename,
+                              index=index)
+        self.Parent = []
         self.Children = []
         self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
 
     def __repr__(self):
         return "SkeletonGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
@@ -50,7 +44,6 @@
 class SkeletonHierarchy(AMRHierarchy):
 
     grid = SkeletonGrid
-    float_type = np.float64
     
     def __init__(self, pf, dataset_type='skeleton'):
         self.dataset_type = dataset_type
@@ -66,6 +59,10 @@
     def _detect_output_fields(self):
         # This needs to set a self.field_list that contains all the available,
         # on-disk fields.
+        # NOTE: Each should be a tuple, where the first element is the on-disk
+        # fluid type or particle type.  Convention suggests that the on-disk
+        # fluid type is usually the dataset_type and the on-disk particle type
+        # (for a single population of particles) is "io".
         pass
     
     def _count_grids(self):
@@ -96,30 +93,34 @@
 
 class SkeletonDataset(Dataset):
     _index_class = SkeletonHierarchy
-    _fieldinfo_fallback = SkeletonFieldInfo
-    _fieldinfo_known = KnownSkeletonFields
-    _handle = None
+    _field_info_class = SkeletonFieldInfo
     
-    def __init__(self, filename, dataset_type='skeleton',
-                 storage_filename = None,
-                 conversion_override = None):
-
-        if conversion_override is None: conversion_override = {}
-        self._conversion_override = conversion_override
-
+    def __init__(self, filename, dataset_type='skeleton'):
+        self.fluid_types += ('skeleton',)
         Dataset.__init__(self, filename, dataset_type)
         self.storage_filename = storage_filename
 
-    def _set_units(self):
-        # This needs to set up the dictionaries that convert from code units to
-        # CGS.  The needed items are listed in the second entry:
-        #   self.time_units         <= sec_conversion
-        #   self.conversion_factors <= mpc_conversion
-        #   self.units              <= On-disk fields
+    def _set_code_unit_attributes(self):
+        # This is where quantities are created that represent the various
+        # on-disk units.  These are the currently available quantities which
+        # should be set, along with examples of how to set them to standard
+        # values.
+        #
+        # self.length_unit = self.quan(1.0, "cm")
+        # self.mass_unit = self.quan(1.0, "g")
+        # self.time_unit = self.quan(1.0, "s")
+        # self.time_unit = self.quan(1.0, "s")
+        #
+        # These can also be set:
+        # self.velocity_unit = self.quan(1.0, "cm/s")
+        # self.magnetic_unit = self.quan(1.0, "gauss")
         pass
 
     def _parse_parameter_file(self):
-        # This needs to set up the following items:
+        # This needs to set up the following items.  Note that these are all
+        # assumed to be in code units; domain_left_edge and domain_right_edge
+        # will be updated to be in code units at a later time.  This includes
+        # the cosmological parameters.
         #
         #   self.unique_identifier
         #   self.parameters             <= full of code-specific items of use

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/_skeleton/definitions.py
--- a/yt/frontends/_skeleton/definitions.py
+++ b/yt/frontends/_skeleton/definitions.py
@@ -0,0 +1,1 @@
+# This file is often empty.  It can hold definitions related to a frontend.

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/_skeleton/fields.py
--- a/yt/frontends/_skeleton/fields.py
+++ b/yt/frontends/_skeleton/fields.py
@@ -13,79 +13,35 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
+import numpy as np
+from yt.funcs import mylog
 from yt.fields.field_info_container import \
-    FieldInfoContainer, \
-    NullFunc, \
-    TranslationFunc, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-from yt.utilities.physical_constants import \
-    kboltz
+    FieldInfoContainer
 
-# The first field container is where any fields that exist on disk go, along
-# with their conversion factors, display names, etc.
+# We need to specify which fields we might have in our dataset.  The field info
+# container subclass here will define which fields it knows about.  There are
+# optionally methods on it that get called which can be subclassed.
 
-KnownSkeletonFields = FieldInfoContainer()
-add_skeleton_field = KnownSkeletonFields.add_field
+class SkeletonFieldInfo(FieldInfoContainer):
+    known_other_fields = (
+        # Each entry here is of the form
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-SkeletonFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = SkeletonFieldInfo.add_field
+    known_particle_fields = (
+        # Identical form to above
+        # ( "name", ("units", ["fields", "to", "alias"], # "display_name")),
+    )
 
-# Often, we want to translate between fields on disk and fields in yt.  This
-# construct shows how to do that.  Note that we use TranslationFunc.
+    def __init__(self, pf):
+        super(SkeletonFieldInfo, self).__init__(pf)
+        # If you want, you can check self.field_list
 
-translation_dict = {"x-velocity": "velx",
-                    "y-velocity": "vely",
-                    "z-velocity": "velz",
-                    "Density": "dens",
-                    "Temperature": "temp",
-                    "Pressure" : "pres", 
-                    "Grav_Potential" : "gpot",
-                    "particle_position_x" : "particle_posx",
-                    "particle_position_y" : "particle_posy",
-                    "particle_position_z" : "particle_posz",
-                    "particle_velocity_x" : "particle_velx",
-                    "particle_velocity_y" : "particle_vely",
-                    "particle_velocity_z" : "particle_velz",
-                    "particle_index" : "particle_tag",
-                    "Electron_Fraction" : "elec",
-                    "HI_Fraction" : "h   ",
-                    "HD_Fraction" : "hd  ",
-                    "HeI_Fraction": "hel ",
-                    "HeII_Fraction": "hep ",
-                    "HeIII_Fraction": "hepp",
-                    "HM_Fraction": "hmin",
-                    "HII_Fraction": "hp  ",
-                    "H2I_Fraction": "htwo",
-                    "H2II_Fraction": "htwp",
-                    "DI_Fraction": "deut",
-                    "DII_Fraction": "dplu",
-                    "ParticleMass": "particle_mass",
-                    "Flame_Fraction": "flam"}
+    def setup_fluid_fields(self):
+        # Here we do anything that might need info about the parameter file.
+        # You can use self.alias, self.add_output_field and self.add_field .
+        pass
 
-for f,v in translation_dict.items():
-    if v not in KnownSkeletonFields:
-        pfield = v.startswith("particle")
-        add_skeleton_field(v, function=NullFunc, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    if f.endswith("_Fraction") :
-        dname = "%s\/Fraction" % f.split("_")[0]
-    else :
-        dname = f                    
-    ff = KnownSkeletonFields[v]
-    pfield = f.startswith("particle")
-    add_field(f, TranslationFunc(v),
-              take_log=KnownSkeletonFields[v].take_log,
-              units = ff.units, display_name=dname,
-              particle_type = pfield)
-
-# Here's an example of adding a new field:
-
-add_skeleton_field("dens", function=NullFunc, take_log=True,
-                convert_function=_get_convert("dens"),
-                units=r"g / cm**3")
+    def setup_particle_fields(self, ptype):
+        # This will get called for every particle type.
+        pass

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/_skeleton/io.py
--- a/yt/frontends/_skeleton/io.py
+++ b/yt/frontends/_skeleton/io.py
@@ -23,12 +23,31 @@
     _particle_reader = False
     _dataset_type = "skeleton"
 
-    def _read_data(self, grid, field):
-        # This must return the array, of size/shape grid.ActiveDimensions, that
-        # corresponds to 'field'.
+    def _read_particle_coords(self, chunks, ptf):
+        # This needs to *yield* a series of tuples of (ptype, (x, y, z)).
+        # chunks is a list of chunks, and ptf is a dict where the keys are
+        # ptypes and the values are lists of fields.
         pass
 
-    def _read_data_slice(self, grid, field, axis, coord):
-        # If this is not implemented, the IO handler will just slice a
-        # _read_data item.
+    def _read_particle_fields(self, chunks, ptf, selector):
+        # This gets called after the arrays have been allocated.  It needs to
+        # yield ((ptype, field), data) where data is the masked results of
+        # reading ptype, field and applying the selector to the data read in.
+        # Selector objects have a .select_points(x,y,z) that returns a mask, so
+        # you need to do your masking here.
         pass
+
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        # This needs to allocate a set of arrays inside a dictionary, where the
+        # keys are the (ftype, fname) tuples and the values are arrays that
+        # have been masked using whatever selector method is appropriate.  The
+        # dict gets returned at the end and it should be flat, with selected
+        # data.  Note that if you're reading grid data, you might need to
+        # special-case a grid selector object.
+        pass
+
+    def _read_chunk_data(self, chunk, fields):
+        # This reads the data from a single chunk, and is only used for
+        # caching.
+        pass

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/frontends/boxlib/fields.py
--- a/yt/frontends/boxlib/fields.py
+++ b/yt/frontends/boxlib/fields.py
@@ -243,12 +243,17 @@
                 self.add_field(name = ("gas", "%s_density" % nice_name),
                                function = func,
                                units = "g/cm**3")
-                # We know this will either have one letter, or two.
-                if field[3] in string.letters:
-                    element, weight = field[2:4], field[4:-1]
-                else:
-                    element, weight = field[2:3], field[3:-1]
-                weight = int(weight)
+                # Most of the time our species will be of the form
+                # element name + atomic weight (e.g. C12), but
+                # sometimes we make up descriptive names (e.g. ash)
+                if any(char.isdigit() for char in field):
+                    # We know this will either have one letter, or two.
+                    if field[3] in string.letters:
+                        element, weight = field[2:4], field[4:-1]
+                    else:
+                        element, weight = field[2:3], field[3:-1]
+                    weight = int(weight)
+
                 # Here we can, later, add number density.
             if field.startswith("omegadot("):
                 nice_name = field[9:-1]

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/units/unit_lookup_table.py
--- a/yt/units/unit_lookup_table.py
+++ b/yt/units/unit_lookup_table.py
@@ -94,6 +94,7 @@
 # Add LaTeX representations for units with trivial representations.
 latex_symbol_lut = {
     "unitary" : "",
+    "dimensionless" : "",
     "code_length" : "\\rm{code}\/\\rm{length}",
     "code_time" : "\\rm{code}\/\\rm{time}",
     "code_mass" : "\\rm{code}\/\\rm{mass}",

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -47,20 +47,18 @@
 
         # Validate
         if not isinstance(cgs_value, float):
-            raise UnitParseError("cgs_value must be a float, got a %s." \
+            raise UnitParseError("cgs_value must be a float, got a %s."
                                  % type(cgs_value))
-        
+
         validate_dimensions(dimensions)
 
         # Add to symbol lut
         if tex_repr is None:
-            latex_symbol_lut[symbol] = "\\rm{" + symbol + "}"
-        else:
-            latex_symbol_lut[symbol] = tex_repr
+            tex_repr = "\\rm{" + symbol + "}"
+        latex_symbol_lut.setdefault(symbol, tex_repr)
 
         # Add to lut
-        if tex_repr is None: tex_repr = symbol
-        self.lut.update( {symbol: (cgs_value, dimensions)} )
+        self.lut.update({symbol: (cgs_value, dimensions)})
 
     def remove(self, symbol):
         """

diff -r f1032b474045bcc6f5cbe14350d306d4cc09e1cd -r ef5172e8e55df9c5909cc27fe26aa6c4d8d530b4 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -251,10 +251,15 @@
         # Left blank to be overriden in subclasses
         pass
 
-    def _switch_pf(self, new_pf):
+    def _switch_pf(self, new_pf, data_source=None):
         ds = self.data_source
         name = ds._type_name
         kwargs = dict((n, getattr(ds, n)) for n in ds._con_args)
+        if data_source is not None:
+            if name != "proj":
+                raise RuntimeError("The data_source keyword argument "
+                                   "is only defined for projections.")
+            kwargs['data_source'] = data_source
         new_ds = getattr(new_pf, name)(**kwargs)
         self.pf = new_pf
         self.data_source = new_ds

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list