[yt-svn] commit/yt-3.0: 22 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Fri Sep 20 10:41:18 PDT 2013


22 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/d234eedb9c77/
Changeset:   d234eedb9c77
Branch:      yt-3.0
User:        scopatz
Date:        2013-07-31 04:26:07
Summary:     some not really working moab reader
Affected #:  11 files

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,11 +6,8 @@
 import subprocess
 import shutil
 import glob
-import setuptools
-from distutils.version import StrictVersion
-if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
-    import distribute_setup
-    distribute_setup.use_setuptools()
+#import distribute_setup
+#distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -156,6 +153,8 @@
 # End snippet
 ######
 
+import setuptools
+
 VERSION = "3.0dev"
 
 if os.path.exists('MANIFEST'):

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/api.py
--- /dev/null
+++ b/yt/frontends/moab/api.py
@@ -0,0 +1,42 @@
+"""
+API for yt.frontends.gdf
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from .data_structures import \
+      MoabHex8Grid, \
+      MoabHex8Hierarchy, \
+      MoabHex8StaticOutput
+
+from .fields import \
+      MoabFieldInfo, \
+      KnownMoabFields, \
+      add_moab_field
+
+from .io import \
+      IOHandlerMoabH5MHex8

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/data_structures.py
--- /dev/null
+++ b/yt/frontends/moab/data_structures.py
@@ -0,0 +1,250 @@
+"""Data structures for MOAB Hex8.
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+
+"""
+
+import h5py
+import numpy as np
+import weakref
+from yt.funcs import *
+from yt.data_objects.grid_patch import \
+           AMRGridPatch
+from yt.geometry.grid_geometry_handler import \
+           GridGeometryHandler
+from yt.data_objects.static_output import \
+           StaticOutput
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.utilities.io_handler import \
+    io_registry
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+
+from .fields import MoabFieldInfo, KnownMoabFields
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, NullFunc
+import pdb
+
+def _get_convert(fname):
+    def _conv(data):
+        return data.convert(fname)
+    return _conv
+
+class MoabHex8Grid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, id, hierarchy, level, start, dimensions):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = []
+        self.Children = []
+        self.Level = level
+        self.start_index = start.copy()
+        self.stop_index = self.start_index + dimensions
+        self.ActiveDimensions = dimensions.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf.refine_by
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = np.array((RE-LE)/self.ActiveDimensions)
+        if self.pf.dimensionality < 2: self.dds[1] = 1.0
+        if self.pf.dimensionality < 3: self.dds[2] = 1.0
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    @property
+    def filename(self):
+        return None
+
+class MoabHex8Hierarchy(GridGeometryHandler):
+
+    grid = MoabHex8Grid
+
+    def __init__(self, pf, data_style='h5m'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        self.max_level = 10  # FIXME
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._fhandle = h5py.File(self.hierarchy_filename,'r')
+        GridGeometryHandler.__init__(self,pf,data_style)
+
+        self._fhandle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        self.field_list = self._fhandle['field_types'].keys()
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        self.num_grids = 1 #self._fhandle['/grid_parent_id'].shape[0]
+
+    def _parse_hierarchy(self):
+        f = self._fhandle
+        dxs = []
+        self.grids = np.empty(self.num_grids, dtype='object')
+        levels = [0]
+        glis = (f['grid_left_index'][:]).copy()
+        gdims = (f['grid_dimensions'][:]).copy()
+        active_dims = ~((np.max(gdims, axis=0) == 1) &
+                        (self.parameter_file.domain_dimensions == 1))
+
+        for i in range(levels.shape[0]):
+            self.grids[i] = self.grid(i, self, levels[i],
+                                      glis[i],
+                                      gdims[i])
+            self.grids[i]._level_id = levels[i]
+
+            dx = (self.parameter_file.domain_right_edge-
+                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
+            dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
+            dxs.append(dx)
+        dx = np.array(dxs)
+        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
+        self.grid_dimensions = gdims.astype("int32")
+        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
+        self.grid_particle_count = f['grid_particle_count'][:]
+        del levels, glis, gdims
+
+    def _populate_grid_objects(self):
+        mask = np.empty(self.grids.size, dtype='int32')
+        for gi, g in enumerate(self.grids):
+            g._prepare_grid()
+            g._setup_dx()
+
+        for gi, g in enumerate(self.grids):
+            g.Children = self._get_grid_children(g)
+            for g1 in g.Children:
+                g1.Parent.append(g)
+            get_box_grids_level(self.grid_left_edge[gi,:],
+                                self.grid_right_edge[gi,:],
+                                self.grid_levels[gi],
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            m = mask.astype("bool")
+            m[gi] = False
+            siblings = self.grids[gi:][m[gi:]]
+            if len(siblings) > 0:
+                g.OverlappingSiblings = siblings.tolist()
+        self.max_level = self.grid_levels.max()
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_box_grids(self, left_edge, right_edge):
+        '''
+        Gets back all the grids between a left edge and right edge
+        '''
+        eps = np.finfo(np.float64).eps
+        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1) \
+                        &  np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
+
+        return self.grids[grid_i], grid_i
+
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self._get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+
+class MoabHex8StaticOutput(StaticOutput):
+    _hierarchy_class = MoabHex8Hierarchy
+    _fieldinfo_fallback = MoabFieldInfo
+    _fieldinfo_known = KnownMoabFields
+
+    def __init__(self, filename, data_style='grid_data_format',
+                 storage_filename = None):
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+        self.filename = filename
+
+    def _set_units(self):
+        """Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['cm'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+        # This should be improved.
+        self._handle = h5py.File(self.parameter_filename, "r")
+        """\
+        for field_name in self._handle["/field_types"]:
+            current_field = self._handle["/field_types/%s" % field_name]
+            if 'field_to_cgs' in current_field.attrs:
+                self.units[field_name] = current_field.attrs['field_to_cgs']
+            else:
+                self.units[field_name] = 1.0
+            if 'field_units' in current_field.attrs:
+                current_fields_unit = just_one(current_field.attrs['field_units'])
+            else:
+                current_fields_unit = ""
+            self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
+                   units=current_fields_unit, projected_units="",
+                   convert_function=_get_convert(field_name))
+        """
+        self._handle.close()
+        del self._handle
+
+    def _parse_parameter_file(self):
+        self._handle = f = h5py.File(self.parameter_filename, "r")
+        coords = self._handle["/tstt/nodes/coordinates"]
+        self.domain_left_edge = coords[0]
+        self.domain_right_edge = coords[-1]
+        self.domain_dimensions = self.domain_right_edge - self.domain_left_edge
+        self.refine_by = 2
+        self.dimensionality = len(self.domain_dimensions)
+        self.current_time = 0.0
+        self.unique_identifier = self.parameter_filename
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        #self.field_ordering = sp["field_ordering"]
+        #self.boundary_conditions = sp["boundary_conditions"][:]
+        #p = [bnd == 0 for bnd in self.boundary_conditions[::2]]
+        #self.periodicity = ensure_tuple(p)
+        self.current_redshift = self.omega_lambda = self.omega_matter \
+                              = self.hubble_constant \
+                              = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+        self._handle.close()
+        del self._handle
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        fname = args[0]
+        return fname.endswith('.h5m')
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/definitions.py
--- /dev/null
+++ b/yt/frontends/moab/definitions.py
@@ -0,0 +1,25 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/fields.py
--- /dev/null
+++ b/yt/frontends/moab/fields.py
@@ -0,0 +1,73 @@
+"""MOAB-specific fields
+
+Author: Samuel W. Skillman <samskillman at gmail.com>
+Affiliation: University of Colorado at Boulder
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+
+"""
+
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType, \
+    NullFunc, \
+    TranslationFunc
+import yt.data_objects.universal_fields
+
+log_translation_dict = {"Density": "density",
+                        "Pressure": "pressure"}
+
+translation_dict = {"x-velocity": "velocity_x",
+                    "y-velocity": "velocity_y",
+                    "z-velocity": "velocity_z"}
+                    
+# translation_dict = {"mag_field_x": "cell_centered_B_x ",
+#                     "mag_field_y": "cell_centered_B_y ",
+#                     "mag_field_z": "cell_centered_B_z "}
+
+MoabFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = MoabFieldInfo.add_field
+
+KnownMoabFields = FieldInfoContainer()
+add_moab_field = KnownMoabFields.add_field
+
+add_moab_field("density", function=NullFunc, take_log=True,
+          units=r"\rm{g}/\rm{cm}^3",
+          projected_units =r"\rm{g}/\rm{cm}^2")
+
+add_moab_field("specific_energy", function=NullFunc, take_log=True,
+          units=r"\rm{erg}/\rm{g}")
+
+add_moab_field("pressure", function=NullFunc, take_log=True,
+          units=r"\rm{erg}/\rm{g}")
+
+add_moab_field("velocity_x", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_moab_field("velocity_y", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_moab_field("velocity_z", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_moab_field("mag_field_x", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_moab_field("mag_field_y", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+add_moab_field("mag_field_z", function=NullFunc, take_log=False,
+          units=r"\rm{cm}/\rm{s}")
+
+for f,v in log_translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=True)
+
+for f,v in translation_dict.items():
+    add_field(f, TranslationFunc(v), take_log=False)
+

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/io.py
--- /dev/null
+++ b/yt/frontends/moab/io.py
@@ -0,0 +1,45 @@
+"""The data-file handling functions
+
+Author: Anthony Scopatz <scopatz at gmail.com>
+Affiliation: The University of Wisconsin-Madison
+
+"""
+import numpy as np
+from yt.funcs import mylog
+from yt.utilities.io_handler import BaseIOHandler
+
+
+def field_dname(field_name):
+    return "/tstt/elements/Hex8/tags/{0}".format(field_name)
+
+
+# TODO all particle bits were removed
+class IOHandlerMoabH5MHex8(BaseIOHandler):
+    _data_style = "h5m"
+    _offset_string = 'data:offsets=0'
+    _data_string = 'data:datatype=0'
+
+    def __init__(self, pf, *args, **kwargs):
+        # TODO check if _num_per_stride is needed
+        self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+        self._handle = pf._handle
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        fhandle = self._handle
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)
+        ngrids = sum(len(chunk.objs) for chunk in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [fname for ftype, fname in fields], ngrids)
+        x, y, z = fhandle['/tstt/nodes/coordinates'][:].T
+        mask = selector.select_points(x, y, z)
+        for field in fields:
+            ftype, fname = field
+            data = fhandle[field_dname(fname)][mask]
+            rv[field][:] = data
+        return rv

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/moab/setup.py
--- /dev/null
+++ b/yt/frontends/moab/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('moab', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -15,6 +15,7 @@
     config.add_subpackage("flash")
     config.add_subpackage("gdf")
     config.add_subpackage("maestro")
+    config.add_subpackage("moab")
     config.add_subpackage("nyx")
     config.add_subpackage("orion")
     config.add_subpackage("artio")

diff -r c7f0b4c36217f18e0b4bb4fdd763b8bab4cade69 -r d234eedb9c777562fe4b2721b2a3af434472b34a yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -104,6 +104,9 @@
 from yt.frontends.gdf.api import \
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
+from yt.frontends.moab.api import \
+    MoabHex8StaticOutput, MoabFieldInfo, add_moab_field
+
 from yt.frontends.athena.api import \
     AthenaStaticOutput, AthenaFieldInfo, add_athena_field
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/24983f726a22/
Changeset:   24983f726a22
Branch:      yt-3.0
User:        scopatz
Date:        2013-08-19 21:01:30
Summary:     merged from upstream
Affected #:  79 files

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,16 @@
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
 sam.skillman at gmail.com = samskillman at gmail.com
 casey at thestarkeffect.com = caseywstark at gmail.com
 chiffre = chiffre at posteo.de
 Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
 function do_setup_py
 {
     [ -e $1/done ] && return
-    echo "Installing $1 (arguments: '$*')"
-    [ ! -e $1/extracted ] && tar xfz $1.tar.gz
-    touch $1/extracted
-    cd $1
-    if [ ! -z `echo $1 | grep h5py` ]
+    LIB=$1
+    shift
+    if [ -z "$@" ]
+    then
+        echo "Installing $LIB"
+    else
+        echo "Installing $LIB (arguments: '$@')"
+    fi
+    [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+    touch $LIB/extracted
+    cd $LIB
+    if [ ! -z `echo $LIB | grep h5py` ]
     then
         shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
 
 function get_ytproject
 {
+    [ -e $1 ] && return
     echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
@@ -551,67 +558,93 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e $BZLIB/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
+        cd $BZLIB
         if [ `uname` = "Darwin" ]
         then
             if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e $ZLIB/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd $ZLIB
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e $PNG/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e $PNG ] && tar xfz $PNG.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd $PNG
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e $FREETYPE_VER/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -688,11 +722,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e $HDF5/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd $HDF5
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e $SQLITE/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd $SQLITE
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+    cd $PYTHON
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    do_setup_py $MERCURIAL
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py $NUMPY ${NUMPY_ARGS}
 else
-    if [ ! -e scipy-0.11.0/done ]
+    if [ ! -e $SCIPY/done ]
     then
 	if [ ! -e BLAS/done ]
 	then
@@ -798,17 +832,17 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o 1>> ${LOG_FILE}
+	    ar r libfblas.a *.o &>> ${LOG_FILE}
 	    ranlib libfblas.a 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
 	fi
-	if [ ! -e lapack-3.4.2/done ]
+	if [ ! -e $LAPACK/done ]
 	then
-	    tar xfz lapack-3.4.2.tar.gz
+	    tar xfz $LAPACK.tar.gz
 	    echo "Building LAPACK"
-	    cd lapack-3.4.2/
+	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
 	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
 	    touch done
@@ -816,9 +850,9 @@
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
-    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/$LAPACK/liblapack.a
+    do_setup_py $NUMPY ${NUMPY_ARGS}
+    do_setup_py $SCIPY ${NUMPY_ARGS}
 fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e $ZEROMQ/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd $ZEROMQ
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py $PYZMQ --zmq=${DEST_DIR}
+    do_setup_py $TORNADO
 fi
 
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
 #!python
 import os, re
-from distutils import version
+from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import YTDataContainer
 namespace = locals().copy()
@@ -23,10 +23,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
     api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+    api_version = '0.11'
 else:
-    api_version = '0.11'
+    api_version = '1.0'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
@@ -42,13 +44,18 @@
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
 elif api_version == "0.10":
     ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
-    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+    if api_version == "0.11":
+        from IPython.frontend.terminal.interactiveshell import \
+            TerminalInteractiveShell
+    elif api_version == "1.0":
+        from IPython.terminal.interactiveshell import TerminalInteractiveShell
+    else:
+        raise RuntimeError
     ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
                     display_banner = True)
     if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
-    raise RuntimeError
+
 
 # The rest is a modified version of the IPython default profile code
 
@@ -77,7 +84,7 @@
     ip = ip_shell.IP.getapi()
     try_next = IPython.ipapi.TryNext
     kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
     ip = ip_shell
     try_next = IPython.core.error.TryNext
     kwargs = dict()

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -96,7 +96,7 @@
     if answer_big_data:
         nose_argv.append('--answer-big-data')
     log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
-    ytcfg["yt","suppressStreamLogging"] = 'True'
+    ytcfg.set("yt","suppressStreamLogging", 'True')
     initial_dir = os.getcwd()
     yt_file = os.path.abspath(__file__)
     yt_dir = os.path.dirname(yt_file)
@@ -105,4 +105,4 @@
         nose.run(argv=nose_argv)
     finally:
         os.chdir(initial_dir)
-        ytcfg["yt","suppressStreamLogging"] = log_suppress
+        ytcfg.set("yt","suppressStreamLogging", str(log_suppress))

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,809 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+        import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts, 
+        minError=1E-5, complexLim=.999,
+        fitLim=.99, minLength=3, 
+        maxLength=1000, splitLim=.99,
+        output_file=None):
+
+    """
+    This function is designed to fit an absorption spectrum by breaking 
+    the spectrum up into absorption complexes, and iteratively adding
+    and optimizing voigt profiles to each complex.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        1d array of wavelengths
+    fluxData : (N) ndarray
+        array of flux corresponding to the wavelengths given
+        in x. (needs to be the same size as x)
+    orderFits : list
+        list of the names of the species in the order that they 
+        should be fit. Names should correspond to the names of the species
+        given in speciesDicts. (ex: ['lya','OVI'])
+    speciesDicts : dictionary
+        Dictionary of dictionaries (I'm addicted to dictionaries, I
+        confess). Top level keys should be the names of all the species given
+        in orderFits. The entries should be dictionaries containing all 
+        relevant parameters needed to create an absorption line of a given 
+        species (f,Gamma,lambda0) as well as max and min values for parameters
+        to be fit
+    complexLim : float, optional
+        Maximum flux to start the edge of an absorption complex. Different 
+        from fitLim because it decides extent of a complex rather than 
+        whether or not a complex is accepted. 
+    fitLim : float,optional
+        Maximum flux where the level of absorption will trigger 
+        identification of the region as an absorption complex. Default = .98.
+        (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+        .99 will not be separated out to be fit as an absorbing complex, but
+        a region that contains a point where the flux is .97 will be fit
+        as an absorbing complex.)
+    minLength : int, optional
+        number of cells required for a complex to be included. 
+        default is 3 cells.
+    maxLength : int, optional
+        number of cells required for a complex to be split up. Default
+        is 1000 cells.
+    splitLim : float, optional
+        if attempting to split a region for being larger than maxlength
+        the point of the split must have a flux greater than splitLim 
+        (ie: absorption greater than splitLim). Default= .99.
+    output_file : string, optional
+        location to save the results of the fit. 
+
+    Returns
+    -------
+    allSpeciesLines : dictionary
+        Dictionary of dictionaries representing the fit lines. 
+        Top level keys are the species given in orderFits and the corresponding
+        entries are dictionaries with the keys 'N','b','z', and 'group#'. 
+        Each of these corresponds to a list of the parameters for every
+        accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+        fits some part of the absorption spectrum). 'group#' is a similar list
+        but identifies which absorbing complex each line belongs to. Lines
+        with the same group# were fit at the same time. group#'s do not
+        correlate between species (ie: an lya line with group number 1 and
+        an OVI line with group number 1 were not fit together and do
+        not necessarily correspond to the same region)
+    yFit : (N) ndarray
+        array of flux corresponding to the combination of all fitted
+        absorption profiles. Same size as x.
+    """
+
+    #Empty dictionary for fitted lines
+    allSpeciesLines = {}
+
+    #Wavelength of beginning of array, wavelength resolution
+    x0,xRes=x[0],x[1]-x[0]
+
+    #Empty fit without any lines
+    yFit = na.ones(len(fluxData))
+
+    #Find all regions where lines/groups of lines are present
+    cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+            complexLim=complexLim, minLength=minLength,
+            maxLength=maxLength, splitLim=splitLim)
+
+    #Fit all species one at a time in given order from low to high wavelength
+    for species in orderFits:
+        speciesDict = speciesDicts[species]
+        speciesLines = {'N':na.array([]),
+                        'b':na.array([]),
+                        'z':na.array([]),
+                        'group#':na.array([])}
+
+        #Set up wavelengths for species
+        initWl = speciesDict['wavelength'][0]
+
+        for b_i,b in enumerate(cBounds):
+            xBounded=x[b[1]:b[2]]
+            yDatBounded=fluxData[b[1]:b[2]]
+            yFitBounded=yFit[b[1]:b[2]]
+
+            #Find init redshift
+            z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+            #Check if any flux at partner sites
+            if not _line_exists(speciesDict['wavelength'],
+                    fluxData,z,x0,xRes,fitLim): 
+                continue 
+
+            #Fit Using complex tools
+            newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+                    z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+            #Check existence of partner lines if applicable
+            newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData, 
+                    b, minError*(b[2]-b[1]),
+                    x0, xRes, speciesDict)
+
+            #If flagged as a bad fit, species is lyman alpha,
+            #   and it may be a saturated line, use special tools
+            if flag and species=='lya' and min(yDatBounded)<.1:
+                newLinesP=_large_flag_fit(xBounded,yDatBounded,
+                        yFitBounded,z,speciesDict,
+                        minSize,minError*(b[2]-b[1]))
+
+            #Adjust total current fit
+            yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+            #Add new group to all fitted lines
+            if na.size(newLinesP)>0:
+                speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+                speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+                speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+                groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+                speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+
+        allSpeciesLines[species]=speciesLines
+
+    if output_file:
+        _output_fit(allSpeciesLines, output_file)
+
+    return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict, 
+        initP=None):
+    """ Fit an absorption complex by iteratively adding and optimizing
+    voigt profiles.
+    
+    A complex is defined as a region where some number of lines may be present,
+    or a region of non zero of absorption. Lines are iteratively added
+    and optimized until the difference between the flux generated using
+    the optimized parameters has a least squares difference between the 
+    desired flux profile less than the error bound.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelength
+    ydat : (N) ndarray
+        array of desired flux profile to be fitted for the wavelength
+        space given by x. Same size as x.
+    yFit : (N) ndarray
+        array of flux profile fitted for the wavelength
+        space given by x already. Same size as x.
+    initz : float
+        redshift to try putting first line at 
+        (maximum absorption for region)
+    minsize : float
+        minimum absorption allowed for a line to still count as a line
+        given in normalized flux (ie: for minSize=.9, only lines with minimum
+        flux less than .9 will be fitted)
+    errbound : float
+        maximum total error allowed for an acceptable fit
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+    initP : (,3,) ndarray
+        initial guess to try for line parameters to fit the region. Used
+        by large_flag_fit. Default = None, and initial guess generated
+        automatically.
+
+    Returns
+    -------
+    linesP : (3,) ndarray
+        Array of best parameters if a good enough fit is found in 
+        the form [[N1,b1,z1], [N2,b2,z2],...]
+    flag : bool
+        boolean value indicating the success of the fit (True if unsuccessful)
+    """
+
+    #Setup initial line guesses
+    if initP==None: #Regular fit
+        initP = [0,0,0] 
+        if min(yDat)<.5: #Large lines get larger initial guess 
+            initP[0] = 10**16
+        elif min(yDat)>.9: #Small lines get smaller initial guess
+            initP[0] = 10**12.5
+        else:
+            initP[0] = speciesDict['init_N']
+        initP[1] = speciesDict['init_b']
+        initP[2]=initz
+        initP=na.array([initP])
+
+    linesP = initP
+
+    #For generating new z guesses
+    wl0 = speciesDict['wavelength'][0]
+
+    #Check if first line exists still
+    if min(yDat-yFit+1)>minSize: 
+        return [],False
+    
+    #Values to proceed through first run
+    errSq,prevErrSq=1,1000
+
+    while True:
+        #Initial parameter guess from joining parameters from all lines
+        #   in lines into a single array
+        initP = linesP.flatten()
+
+        #Optimize line
+        fitP,success=optimize.leastsq(_voigt_error,initP,
+                args=(x,yDat,yFit,speciesDict),
+                epsfcn=1E-10,maxfev=1000)
+
+        #Set results of optimization
+        linesP = na.reshape(fitP,(-1,3))
+
+        #Generate difference between current best fit and data
+        yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+        dif = yFit*yNewFit-yDat
+
+        #Sum to get idea of goodness of fit
+        errSq=sum(dif**2)
+
+        #If good enough, break
+        if errSq < errBound: 
+            break
+
+        #If last fit was worse, reject the last line and revert to last fit
+        if errSq > prevErrSq*10:
+            #If its still pretty damn bad, cut losses and try flag fit tools
+            if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+                return [],True
+            else:
+                yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+                break
+
+        #If too many lines 
+        if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+            #If its fitable by flag tools and still bad, use flag tools
+            if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+                return [],True
+            else:
+                break 
+
+        #Store previous data in case reject next fit
+        prevErrSq = errSq
+        prevLinesP = linesP
+
+
+        #Set up initial condition for new line
+        newP = [0,0,0] 
+        if min(dif)<.1:
+            newP[0]=10**12
+        elif min(dif)>.9:
+            newP[0]=10**16
+        else:
+            newP[0]=10**14
+        newP[1] = speciesDict['init_b']
+        newP[2]=(x[dif.argmax()]-wl0)/wl0
+        linesP=na.append(linesP,[newP],axis=0)
+
+
+    #Check the parameters of all lines to see if they fall in an
+    #   acceptable range, as given in dict ref
+    remove=[]
+    for i,p in enumerate(linesP):
+        check=_check_params(na.array([p]),speciesDict)
+        if check: 
+            remove.append(i)
+    linesP = na.delete(linesP,remove,axis=0)
+
+    return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+    """
+    Attempts to more robustly fit saturated lyman alpha regions that have
+    not converged to satisfactory fits using the standard tools.
+
+    Uses a preselected sample of a wide range of initial parameter guesses
+    designed to fit saturated lines (see get_test_lines).
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelength
+    ydat : (N) ndarray
+        array of desired flux profile to be fitted for the wavelength
+        space given by x. Same size as x.
+    yFit : (N) ndarray
+        array of flux profile fitted for the wavelength
+        space given by x already. Same size as x.
+    initz : float
+        redshift to try putting first line at 
+        (maximum absorption for region)
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+    minsize : float
+        minimum absorption allowed for a line to still count as a line
+        given in normalized flux (ie: for minSize=.9, only lines with minimum
+        flux less than .9 will be fitted)
+    errbound : float
+        maximum total error allowed for an acceptable fit
+
+    Returns
+    -------
+    bestP : (3,) ndarray
+        array of best parameters if a good enough fit is found in 
+        the form [[N1,b1,z1], [N2,b2,z2],...]  
+    """
+
+    #Set up some initial line guesses
+    lineTests = _get_test_lines(initz)
+
+    #Keep track of the lowest achieved error
+    bestError = 1000 
+
+    #Iterate through test line guesses
+    for initLines in lineTests:
+        if initLines[1,0]==0:
+            initLines = na.delete(initLines,1,axis=0)
+
+        #Do fitting with initLines as first guess
+        linesP,flag=_complex_fit(x,yDat,yFit,initz,
+                minSize,errBound,speciesDict,initP=initLines)
+
+        #Find error of last fit
+        yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+        dif = yFit*yNewFit-yDat
+        errSq=sum(dif**2)
+
+        #If error lower, keep track of the lines used to make that fit
+        if errSq < bestError:
+            bestError = errSq
+            bestP = linesP
+
+    if bestError>10*errBound*len(x): 
+        return []
+    else:
+        return bestP
+
+def _get_test_lines(initz):
+    """
+    Returns a 3d numpy array of lines to test as initial guesses for difficult
+    to fit lyman alpha absorbers that are saturated. 
+    
+    The array is 3d because
+    the first dimension gives separate initial guesses, the second dimension
+    has multiple lines for the same guess (trying a broad line plus a 
+    saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+    Parameters
+    ----------
+    initz : float
+        redshift to give all the test lines
+
+    Returns
+    -------
+    testP : (,3,) ndarray
+        numpy array of the form 
+        [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+    """
+
+    #Set up a bunch of empty lines
+    testP = na.zeros((10,2,3))
+
+    testP[0,0,:]=[1E18,20,initz]
+    testP[1,0,:]=[1E18,40,initz]
+    testP[2,0,:]=[1E16,5, initz]
+    testP[3,0,:]=[1E16,20,initz]
+    testP[4,0,:]=[1E16,80,initz]
+
+    testP[5,0,:]=[1E18,20,initz]
+    testP[6,0,:]=[1E18,40,initz]
+    testP[7,0,:]=[1E16,5, initz]
+    testP[8,0,:]=[1E16,20,initz]
+    testP[9,0,:]=[1E16,80,initz]
+
+    testP[5,1,:]=[1E13,100,initz]
+    testP[6,1,:]=[1E13,100,initz]
+    testP[7,1,:]=[1E13,100,initz]
+    testP[8,1,:]=[1E13,100,initz]
+    testP[9,1,:]=[1E13,100,initz]
+
+    return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+    """ 
+    Gets the indices of range of wavelength that the wavelength wl is in 
+    with the size of some initial wavelength range.
+
+    Used for checking if species with multiple lines (as in the OVI doublet)
+    fit all lines appropriately.
+
+    Parameters
+    ----------
+    z : float
+        redshift
+    b : (3) ndarray/list
+        initial bounds in form [i0,i1,i2] where i0 is the index of the 
+        minimum flux for the complex, i1 is index of the lower wavelength 
+        edge of the complex, and i2 is the index of the higher wavelength
+        edge of the complex.
+    wl : float
+        unredshifted wavelength of the peak of the new region 
+    x0 : float
+        wavelength of the index 0
+    xRes : float
+        difference in wavelength for two consecutive indices
+    
+    Returns
+    -------
+    indices : (2) tuple
+        Tuple (i1,i2) where i1 is the index of the lower wavelength bound of 
+        the new region and i2 is the index of the higher wavelength bound of
+        the new region
+    """
+
+    r=[-b[1]+100+b[0],b[2]+100-b[0]]
+    redWl = (z+1)*wl
+    iRedWl=int((redWl-x0)/xRes)
+    indices = (iRedWl-r[0],iRedWl+r[1])
+
+    return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound, 
+        x0, xRes, speciesDict):
+    """
+    Given a set of parameters [N,b,z] that form multiple lines for a given
+    species (as in the OVI doublet), remove any set of parameters where
+    not all transition wavelengths have a line that matches the fit.
+
+    (ex: if a fit is determined based on the first line of the OVI doublet,
+    but the given parameters give a bad fit of the wavelength space of
+    the second line then that set of parameters is removed from the array
+    of line parameters.)
+
+    Parameters
+    ----------
+    linesP : (3,) ndarray
+        array giving sets of line parameters in 
+        form [[N1, b1, z1], ...]
+    x : (N) ndarray
+        wavelength array [nm]
+    y : (N) ndarray
+        normalized flux array of original data
+    b : (3) tuple/list/ndarray
+        indices that give the bounds of the original region so that another 
+        region of similar size can be used to determine the goodness
+        of fit of the other wavelengths
+    errBound : float
+        size of the error that is appropriate for a given region, 
+        adjusted to account for the size of the region.
+
+    Returns
+    -------
+    linesP : (3,) ndarray
+        array similar to linesP that only contains lines with
+        appropriate fits of all transition wavelengths.
+    """
+
+    #List of lines to remove
+    removeLines=[]
+
+    #Iterate through all sets of line parameters
+    for i,p in enumerate(linesP):
+
+        #iterate over all transition wavelengths
+        for wl in speciesDict['wavelength']:
+
+            #Get the bounds of a similar sized region around the
+            #   appropriate wavelength, and then get the appropriate
+            #   region of wavelength and flux
+            lb = _get_bounds(p[2],b,wl,x0,xRes)
+            xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+            #Generate a fit and find the difference to data
+            yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+            dif =yb-yFitb
+
+            #Only counts as an error if line is too big ---------------<
+            dif = [k for k in dif if k>0]
+            err = sum(dif)
+
+            #If the fit is too bad then add the line to list of removed lines
+            if err > errBound*1E2:
+                removeLines.append(i)
+                break
+
+    #Remove all bad line fits
+    linesP = na.delete(linesP,removeLines,axis=0)
+
+    return linesP 
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+    """For a group of lines finds if the there is some change in flux greater
+    than some minimum at the same redshift with different initial wavelengths
+
+    Parameters
+    ----------
+    wavelengths : (N) ndarray
+        array of initial wavelengths to check
+    y : (N) ndarray
+        flux array to check
+    x0 : float
+        wavelength of the first value in y
+    xRes : float
+        difference in wavelength between consecutive cells in flux array
+    fluxMin : float
+        maximum flux to count as a line existing. 
+
+    Returns
+    -------
+
+    flag : boolean 
+        value indicating whether all lines exist. True if all lines exist
+    """
+
+    #Iterate through initial wavelengths
+    for wl in wavelengths:
+        #Redshifted wavelength
+        redWl = (z+1)*wl
+
+        #Index of the redshifted wavelength
+        indexRedWl = (redWl-x0)/xRes
+
+        #Check if surpasses minimum absorption bound
+        if y[int(indexRedWl)]>fluxMin:
+            return False
+
+    return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+        minLength =3, maxLength=1000, splitLim=.99):
+    """Breaks up the wavelength space into groups
+    where there is some absorption. 
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        array of wavelengths
+    yDat : (N) ndarray
+        array of flux corresponding to the wavelengths given
+        in x. (needs to be the same size as x)
+    complexLim : float, optional
+        Maximum flux to start the edge of an absorption complex. Different 
+        from fitLim because it decides extent of a complex rather than 
+        whether or not a complex is accepted. 
+    fitLim : float,optional
+        Maximum flux where the level of absorption will trigger 
+        identification of the region as an absorption complex. Default = .98.
+        (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+        .99 will not be separated out to be fit as an absorbing complex, but
+        a region that contains a point where the flux is .97 will be fit
+        as an absorbing complex.)
+    minLength : int, optional
+        number of cells required for a complex to be included. 
+        default is 3 cells.
+    maxLength : int, optional
+        number of cells required for a complex to be split up. Default
+        is 1000 cells.
+    splitLim : float, optional
+        if attempting to split a region for being larger than maxlength
+        the point of the split must have a flux greater than splitLim 
+        (ie: absorption greater than splitLim). Default= .99.
+
+    Returns
+    -------
+    cBounds : (3,) 
+        list of bounds in the form [[i0,i1,i2],...] where i0 is the 
+        index of the maximum flux for a complex, i1 is the index of the
+        beginning of the complex, and i2 is the index of the end of the 
+        complex. Indexes refer to the indices of x and yDat.
+    """
+
+    #Initialize empty list of bounds
+    cBounds=[]
+
+    #Iterate through cells of flux
+    i=0
+    while (i<len(x)):
+
+        #Start tracking at a region that surpasses flux of edge
+        if yDat[i]<complexLim:
+
+            #Iterate through until reach next edge
+            j=0
+            while yDat[i+j]<complexLim: j=j+1
+
+            #Check if the complex is big enough
+            if j >minLength:
+
+                #Check if there is enough absorption for the complex to
+                #   be included
+                cPeak = yDat[i:i+j].argmin()
+                if yDat[cPeak+i]<fitLim:
+                    cBounds.append([cPeak+i,i,i+j])
+
+            i=i+j
+        i=i+1
+
+    i=0
+    #Iterate through the bounds
+    while i < len(cBounds):
+        b=cBounds[i]
+
+        #Check if the region needs to be divided
+        if b[2]-b[1]>maxLength:
+
+            #Find the minimum absorption in the middle two quartiles of
+            #   the large complex
+            q=(b[2]-b[1])/4
+            cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+            #Only break it up if the minimum absorption is actually low enough
+            if yDat[cut]>splitLim:
+
+                #Get the new two peaks
+                b1Peak = yDat[b[1]:cut].argmin()+b[1]
+                b2Peak = yDat[cut:b[2]].argmin()+cut
+
+                #add the two regions separately
+                cBounds.insert(i+1,[b1Peak,b[1],cut])
+                cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+                #Remove the original region
+                cBounds.pop(i)
+                i=i+1
+        i=i+1
+
+    return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+    """
+    Calculates the normalized flux for a region of wavelength space
+    generated by a set of absorption lines.
+
+    Parameters
+    ----------
+    x : (N) ndarray
+        Array of wavelength
+    linesP: (3,) ndarray
+        Array giving sets of line parameters in 
+        form [[N1, b1, z1], ...]
+    speciesDict : dictionary
+        Dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+
+    Returns
+    -------
+    flux : (N) ndarray
+        Array of normalized flux generated by the line parameters
+        given in linesP over the wavelength space given in x. Same size as x.
+    """
+    y=0
+    for p in linesP:
+        for i in range(speciesDict['numLines']):
+            f=speciesDict['f'][i]
+            g=speciesDict['Gamma'][i]
+            wl=speciesDict['wavelength'][i]
+            y = y+ _gen_tau(x,p,f,g,wl)
+    flux = na.exp(-y)
+    return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+    """This calculates a flux distribution for given parameters using the yt
+    voigt profile generator"""
+    N,b,z= p
+    
+    #Calculating quantities
+    tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+    a=7.95774715459E-15*Gamma*lambda_unshifted/b
+    x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+    
+    H = na.zeros(len(x))
+    H = voigt(a,x)
+    
+    tau = tau_o*H
+
+    return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+    """
+    Gives the error of each point  used to optimize the fit of a group
+        of absorption lines to a given flux profile.
+
+        If the parameters are not in the acceptable range as defined
+        in speciesDict, the first value of the error array will
+        contain a large value (999), to prevent the optimizer from running
+        into negative number problems.
+
+    Parameters
+    ----------
+    pTotal : (3,) ndarray 
+        Array with form [[N1, b1, z1], ...] 
+    x : (N) ndarray
+        array of wavelengths [nm]
+    yDat : (N) ndarray
+        desired normalized flux from fits of lines in wavelength
+        space given by x
+    yFit : (N) ndarray
+        previous fit over the wavelength space given by x.
+    speciesDict : dictionary
+        dictionary containing all relevant parameters needed
+        to create an absorption line of a given species (f,Gamma,lambda0)
+        as well as max and min values for parameters to be fit
+
+    Returns
+    -------
+    error : (N) ndarray
+        the difference between the fit generated by the parameters
+        given in pTotal multiplied by the previous fit and the desired
+        flux profile, w/ first index modified appropriately for bad 
+        parameter choices
+    """
+
+    pTotal.shape = (-1,3)
+    yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+    error = yDat-yFit*yNewFit
+    error[0] = _check_params(pTotal,speciesDict)
+
+    return error
+
+def _check_params(p, speciesDict):
+    """
+    Check to see if any of the parameters in p fall outside the range 
+        given in speciesDict.
+
+    Parameters
+    ----------
+    p : (3,) ndarray
+        array with form [[N1, b1, z1], ...] 
+    speciesDict : dictionary
+        dictionary with properties giving the max and min
+        values appropriate for each parameter N,b, and z.
+
+    Returns
+    -------
+    check : int
+        0 if all values are fine
+        999 if any values fall outside acceptable range
+    """
+    check = 0
+    if any(p[:,0] > speciesDict['maxN']) or\
+          any(p[:,0] < speciesDict['minN']) or\
+          any(p[:,1] > speciesDict['maxb']) or\
+          any(p[:,1] < speciesDict['minb']) or\
+          any(p[:,2] > speciesDict['maxz']) or\
+          any(p[:,2] < speciesDict['minz']):
+              check = 999
+    return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+    """
+    This function is designed to output the parameters of the series
+    of lines used to fit an absorption spectrum. 
+
+    The dataset contains entries in the form species/N, species/b
+    species/z, and species/complex. The ith entry in each of the datasets
+    is the fitted parameter for the ith line fitted to the spectrum for
+    the given species. The species names come from the fitted line
+    dictionary.
+
+    Parameters
+    ----------
+    lineDic : dictionary
+        Dictionary of dictionaries representing the fit lines. 
+        Top level keys are the species given in orderFits and the corresponding
+        entries are dictionaries with the keys 'N','b','z', and 'group#'. 
+        Each of these corresponds to a list of the parameters for every
+        accepted fitted line. 
+    fileName : string, optional
+        Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+    """
+    f = h5py.File(file_name, 'w')
+    for ion, params in lineDic.iteritems():
+        f.create_dataset("{0}/N".format(ion),data=params['N'])
+        f.create_dataset("{0}/b".format(ion),data=params['b'])
+        f.create_dataset("{0}/z".format(ion),data=params['z'])
+        f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+    print 'Writing spectrum fit to {0}'.format(file_name)
+

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
 
 from .absorption_spectrum import \
     AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+    generate_total_fit

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -30,6 +30,7 @@
 
 class StandardRadialAnalysis(object):
     def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
+        raise NotImplementedError  # see TODO
         self.pf = pf
         # We actually don't want to replicate the handling of setting the
         # center here, so we will pass it to the sphere creator.
@@ -53,6 +54,7 @@
         prof = BinnedProfile1D(self.obj, self.n_bins, "Radius",
                                self.inner_radius, self.outer_radius)
         by_weights = defaultdict(list)
+        # TODO: analysis_field_list is undefined
         for fspec in analysis_field_list:
             if isinstance(fspec, types.TupleType) and len(fspec) == 2:
                 field, weight = fspec

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
 import ConfigParser, os, os.path, types
 
 ytcfgDefaults = dict(
-    serialize = 'True',
+    serialize = 'False',
     onlydeserialize = 'False',
     timefunctions = 'False',
     logfile = 'False',
@@ -62,7 +62,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold008',
+    gold_standard_filename = 'gold010',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None',
     thread_field_detection = 'False'

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -80,7 +80,7 @@
 
     def eval(self, pf):
         slc = self.SlicePlot(pf, self.axis, self.field, center = self.center)
-        return pc.save()
+        return slc.save()
 
 class QuantityProxy(AnalysisTask):
     _params = None

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -36,6 +36,7 @@
 import fileinput
 from re import finditer
 
+from yt.config import ytcfg
 from yt.funcs import *
 from yt.utilities.logger import ytLogger
 from .data_containers import \
@@ -497,10 +498,13 @@
     def _fill_fields(self, fields):
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
                          for field in fields]
+        domain_dims = self.pf.domain_dimensions.astype("int64") \
+                    * self.pf.refine_by**self.level
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
             fill_region(input_fields, output_fields, self.level,
-                        self.global_startindex, chunk.icoords, chunk.ires)
+                        self.global_startindex, chunk.icoords, chunk.ires,
+                        domain_dims, self.pf.refine_by)
         for name, v in zip(fields, output_fields):
             self[name] = v
 
@@ -653,13 +657,14 @@
     def _fill_fields(self, fields):
         ls = self._initialize_level_state(fields)
         for level in range(self.level + 1):
-            tot = 0
+            domain_dims = self.pf.domain_dimensions.astype("int64") \
+                        * self.pf.refine_by**level
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
-                tot += fill_region(input_fields, ls.fields, ls.current_level,
+                fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
-                            chunk.ires)
+                            chunk.ires, domain_dims, self.pf.refine_by)
             self._update_level_state(ls)
         for name, v in zip(fields, ls.fields):
             if self.level > 0: v = v[1:-1,1:-1,1:-1]

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -411,10 +411,12 @@
     def blocks(self):
         for io_chunk in self.chunks([], "io"):
             for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
-                g = self._current_chunk.objs[0]
-                mask = g._get_selector_mask(self.selector)
-                if mask is None: continue
-                yield g, mask
+                # For grids this will be a grid object, and for octrees it will
+                # be an OctreeSubset.  Note that we delegate to the sub-object.
+                o = self._current_chunk.objs[0]
+                for b, m in o.select_blocks(self.selector):
+                    if m is None: continue
+                    yield b, m
 
 class GenerationInProgress(Exception):
     def __init__(self, fields):
@@ -433,7 +435,9 @@
     @property
     def selector(self):
         if self._selector is not None: return self._selector
-        sclass = getattr(yt.geometry.selection_routines,
+        s_module = getattr(self, '_selector_module',
+                           yt.geometry.selection_routines)
+        sclass = getattr(s_module,
                          "%s_selector" % self._type_name, None)
         if sclass is None:
             raise YTDataSelectorNotImplemented(self._type_name)
@@ -456,7 +460,9 @@
         for field in itertools.cycle(fields_to_get):
             if inspected >= len(fields_to_get): break
             inspected += 1
-            if field not in self.pf.field_dependencies: continue
+            fd = self.pf.field_dependencies.get(field, None) or \
+                 self.pf.field_dependencies.get(field[1], None)
+            if fd is None: continue
             fd = self.pf.field_dependencies[field]
             requested = self._determine_fields(list(set(fd.requested)))
             deps = [d for d in requested if d not in fields_to_get]

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -276,6 +276,13 @@
             else:
                 field = item
             finfo = self.pf._get_field_info(*field)
+            # For those cases where we are guessing the field type, we will
+            # need to re-update -- otherwise, our item will always not have the
+            # field type.  This can lead to, for instance, "unknown" particle
+            # types not getting correctly identified.
+            # Note that the *only* way this works is if we also fix our field
+            # dependencies during checking.  Bug #627 talks about this.
+            item = self.pf._last_freq
         else:
             FI = getattr(self.pf, "field_info", FieldInfo)
             if item in FI:
@@ -444,7 +451,7 @@
         dd['units'] = self._units
         dd['projected_units'] = self._projected_units,
         dd['take_log'] = self.take_log
-        dd['validators'] = self.validators.copy()
+        dd['validators'] = list(self.validators)
         dd['particle_type'] = self.particle_type
         dd['vector_field'] = self.vector_field
         dd['display_field'] = True

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -492,12 +492,16 @@
         if vals is None: return
         return vals.reshape(self.ActiveDimensions, order="C")
 
+    def select_blocks(self, selector):
+        mask = self._get_selector_mask(selector)
+        yield self, mask
+
     def _get_selector_mask(self, selector):
-        if id(selector) == self._last_selector_id:
+        if hash(selector) == self._last_selector_id:
             mask = self._last_mask
         else:
             self._last_mask = mask = selector.fill_mask(self)
-            self._last_selector_id = id(selector)
+            self._last_selector_id = hash(selector)
             if mask is None:
                 self._last_count = 0
             else:

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -91,16 +91,32 @@
             return tr
         return tr
 
+    @property
+    def nz(self):
+        return self._num_zones + 2*self._num_ghost_zones
+
     def _reshape_vals(self, arr):
         if len(arr.shape) == 4: return arr
-        nz = self._num_zones + 2*self._num_ghost_zones
+        nz = self.nz
         n_oct = arr.shape[0] / (nz**3.0)
-        arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        if arr.size == nz*nz*nz*n_oct:
+            arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+        elif arr.size == nz*nz*nz*n_oct * 3:
+            arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+        else:
+            raise RuntimeError
         arr = np.asfortranarray(arr)
         return arr
 
     _domain_ind = None
 
+    def select_blocks(self, selector):
+        mask = self.oct_handler.mask(selector)
+        mask = self._reshape_vals(mask)
+        slicer = OctreeSubsetBlockSlice(self)
+        for i, sl in slicer:
+            yield sl, mask[:,:,:,i]
+
     @property
     def domain_ind(self):
         if self._domain_ind is None:
@@ -113,12 +129,17 @@
         cls = getattr(particle_deposit, "deposit_%s" % method, None)
         if cls is None:
             raise YTParticleDepositionNotImplemented(method)
-        nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+        nz = self.nz
+        nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
         op = cls(nvals) # We allocate number of zones, not number of octs
         op.initialize()
-        mylog.debug("Depositing %s particles into %s Octs",
-            positions.shape[0], nvals[-1])
-        op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+        mylog.debug("Depositing %s (%s^3) particles into %s Octs",
+            positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
+        pos = np.array(positions, dtype="float64")
+        # We should not need the following if we know in advance all our fields
+        # need no casting.
+        fields = [np.asarray(f, dtype="float64") for f in fields]
+        op.process_octree(self.oct_handler, self.domain_ind, pos, fields,
             self.domain_id, self._domain_offset)
         vals = op.finalize()
         if vals is None: return
@@ -127,7 +148,7 @@
     def select_icoords(self, dobj):
         d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -135,7 +156,7 @@
     def select_fcoords(self, dobj):
         d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
                                      num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -143,7 +164,7 @@
     def select_fwidth(self, dobj):
         d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
                                             domain_id = self.domain_id)
         return tr
@@ -151,7 +172,7 @@
     def select_ires(self, dobj):
         d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
                                   num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / 8
+        self._num_octs = d.shape[0] / (self.nz**3)
         tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
                                             domain_id = self.domain_id)
         return tr
@@ -162,7 +183,7 @@
         return n
 
     def count(self, selector):
-        if id(selector) == self._last_selector_id:
+        if hash(selector) == self._last_selector_id:
             if self._last_mask is None: return 0
             return self._last_mask.sum()
         self.select(selector)
@@ -182,7 +203,7 @@
     # This is some subset of an octree.  Note that the sum of subsets of an
     # octree may multiply include data files.  While we can attempt to mitigate
     # this, it's unavoidable for many types of data storage on disk.
-    _type_name = 'particle_octree_subset'
+    _type_name = 'indexed_octree_subset'
     _con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
     domain_id = -1
     def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
@@ -203,3 +224,49 @@
         self.base_region = base_region
         self.base_selector = base_region.selector
 
+class OctreeSubsetBlockSlice(object):
+    def __init__(self, octree_subset):
+        self.ind = None
+        self.octree_subset = octree_subset
+        # Cache some attributes
+        nz = octree_subset.nz
+        self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+        for attr in ["ires", "icoords", "fcoords", "fwidth"]:
+            v = getattr(octree_subset, attr)
+            setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
+
+    def __iter__(self):
+        for i in range(self._ires.shape[-1]):
+            self.ind = i
+            yield i, self
+
+    def clear_data(self):
+        pass
+
+    def __getitem__(self, key):
+        return self.octree_subset[key][:,:,:,self.ind]
+
+    def get_vertex_centered_data(self, *args, **kwargs):
+        raise NotImplementedError
+
+    @property
+    def id(self):
+        return np.random.randint(1)
+
+    @property
+    def Level(self):
+        return self._ires[0,0,0,self.ind]
+
+    @property
+    def LeftEdge(self):
+        LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+        return LE
+
+    @property
+    def RightEdge(self):
+        RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+        return RE
+
+    @property
+    def dds(self):
+        return self._fwidth[0,0,0,self.ind,:]

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -41,6 +41,30 @@
     mass_sun_cgs, \
     mh
 
+def _field_concat(fname):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.pf.particle_types:
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
+            v.append(data[ptype, fname].copy())
+        rv = np.concatenate(v, axis=0)
+        return rv
+    return _AllFields
+
+def _field_concat_slice(fname, axi):
+    def _AllFields(field, data):
+        v = []
+        for ptype in data.pf.particle_types:
+            if ptype == "all" or \
+                ptype in data.pf.known_filters:
+                  continue
+            v.append(data[ptype, fname][:,axi])
+        rv = np.concatenate(v, axis=0)
+        return rv
+    return _AllFields
+
 def particle_deposition_functions(ptype, coord_name, mass_name, registry):
     orig = set(registry.keys())
     def particle_count(field, data):

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -183,6 +183,8 @@
 
         # Get our bins
         if log_space:
+            if lower_bound <= 0.0 or upper_bound <= 0.0:
+                raise YTIllDefinedBounds(lower_bound, upper_bound)
             func = np.logspace
             lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
         else:
@@ -522,7 +524,10 @@
         return [self.x_bin_field, self.y_bin_field]
 
 def fix_bounds(upper, lower, logit):
-    if logit: return np.log10(upper), np.log10(lower)
+    if logit:
+        if lower <= 0.0 or upper <= 0.0:
+            raise YTIllDefinedBounds(lower, upper)
+        return np.log10(upper), np.log10(lower)
     return upper, lower
 
 class BinnedProfile2DInlineCut(BinnedProfile2D):
@@ -545,6 +550,8 @@
         self.total_stuff = source_data.sum()
         binned_field = self._get_empty_field()
         weight_field = self._get_empty_field()
+        m_field = self._get_empty_field()
+        q_field = self._get_empty_field()
         used_field = self._get_empty_field()
         mi = args[0]
         bin_indices_x = args[1][self.indices].ravel().astype('int64')
@@ -553,8 +560,8 @@
         weight_data = weight_data[mi][self.indices]
         nx = bin_indices_x.size
         #mylog.debug("Binning %s / %s times", source_data.size, nx)
-        Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
-                     weight_field, binned_field, used_field)
+        bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+                      weight_field, binned_field, m_field, q_field, used_field)
         if accumulation: # Fix for laziness
             if not iterable(accumulation):
                 raise SyntaxError("Accumulation needs to have length 2")

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -194,7 +194,7 @@
             ts = np.abs(ts)
         self._dts[grid.id] = dts
         self._ts[grid.id] = ts
-        self._masks[grid.id] = masks
+        self._masks[grid.id] = mask
         return mask
 
 
@@ -248,33 +248,6 @@
         self._set_center(center)
         self.coord = coord
 
-    def reslice(self, coord):
-        """
-        Change the entire dataset, clearing out the current data and slicing at
-        a new location.  Not terribly useful except for in-place plot changes.
-        """
-        mylog.debug("Setting coordinate to %0.5e" % coord)
-        self.coord = coord
-        self.field_data.clear()
-
-    def shift(self, val):
-        """
-        Moves the slice coordinate up by either a floating point value, or an
-        integer number of indices of the finest grid.
-        """
-        if isinstance(val, types.FloatType):
-            # We add the dx
-            self.coord += val
-        elif isinstance(val, types.IntType):
-            # Here we assume that the grid is the max level
-            level = self.hierarchy.max_level
-            self.coord
-            dx = self.hierarchy.select_grids(level)[0].dds[self.axis]
-            self.coord += dx * val
-        else:
-            raise ValueError(val)
-        self.field_data.clear()
-
     def _generate_container_field(self, field):
         if self._current_chunk is None:
             self.hierarchy._identify_base_chunk(self)
@@ -375,7 +348,6 @@
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
         self._x_vec = self.orienter.unit_vectors[0]
         self._y_vec = self.orienter.unit_vectors[1]
-        self._d = -1.0 * np.dot(self._norm_vec, self.center)
         # First we try all three, see which has the best result:
         vecs = np.identity(3)
         self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
@@ -672,38 +644,6 @@
             raise SyntaxError("Making a fixed resolution slice with "
                               "particles isn't supported yet.")
 
-    def reslice(self, normal, center, width):
-
-        # Cleanup
-        del self._coord
-        del self._pixelmask
-
-        self.center = center
-        self.width = width
-        self.dds = self.width / self.dims
-        self.set_field_parameter('center', center)
-        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
-        self._d = -1.0 * np.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = np.identity(3)
-        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
-        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
-        self.set_field_parameter('cp_x_vec',self._x_vec)
-        self.set_field_parameter('cp_y_vec',self._y_vec)
-        self.set_field_parameter('cp_z_vec',self._norm_vec)
-        # Calculate coordinates of each pixel
-        _co = self.dds * \
-              (np.mgrid[-self.dims/2 : self.dims/2,
-                        -self.dims/2 : self.dims/2] + 0.5)
-
-        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
-                      np.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
     def get_data(self, fields):
         """
         Iterates over the list of fields and generates/reads them all.
@@ -888,7 +828,6 @@
     """
     _type_name = "region"
     _con_args = ('center', 'left_edge', 'right_edge')
-    _dx_pad = 0.5
     def __init__(self, center, left_edge, right_edge, fields = None,
                  pf = None, **kwargs):
         YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -8,7 +8,7 @@
 def configuration(parent_package='', top_path=None):
     from numpy.distutils.misc_util import Configuration
     config = Configuration('data_objects', parent_package, top_path)
+    config.add_subpackage("tests")
     config.make_config_py()  # installs __config__.py
-    config.add_subpackage("tests")
     #config.make_svn_version_py()
     return config

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -86,8 +86,11 @@
         if not os.path.exists(apath): raise IOError(filename)
         if apath not in _cached_pfs:
             obj = object.__new__(cls)
-            _cached_pfs[apath] = obj
-        return _cached_pfs[apath]
+            if obj._skip_cache is False:
+                _cached_pfs[apath] = obj
+        else:
+            obj = _cached_pfs[apath]
+        return obj
 
     def __init__(self, filename, data_style=None, file_style=None):
         """
@@ -157,6 +160,10 @@
     def _mrep(self):
         return MinimalStaticOutput(self)
 
+    @property
+    def _skip_cache(self):
+        return False
+
     def hub_upload(self):
         self._mrep.upload()
 
@@ -261,6 +268,10 @@
             raise YTGeometryNotSupported(self.geometry)
 
     def add_particle_filter(self, filter):
+        # This is a dummy, which we set up to enable passthrough of "all"
+        # concatenation fields.
+        n = getattr(filter, "name", filter)
+        self.known_filters[n] = None
         if isinstance(filter, types.StringTypes):
             used = False
             for f in filter_registry[filter]:
@@ -271,6 +282,7 @@
         else:
             used = self.h._setup_filtered_type(filter)
         if not used:
+            self.known_filters.pop(n, None)
             return False
         self.known_filters[filter.name] = filter
         return True
@@ -290,20 +302,25 @@
             self._last_finfo = self.field_info[(ftype, fname)]
             return self._last_finfo
         if fname == self._last_freq[1]:
-            mylog.debug("Guessing field %s is (%s, %s)", fname,
-                        self._last_freq[0], self._last_freq[1])
             return self._last_finfo
         if fname in self.field_info:
+            # Sometimes, if guessing_type == True, this will be switched for
+            # the type of field it is.  So we look at the field type and
+            # determine if we need to change the type.
+            fi = self._last_finfo = self.field_info[fname]
+            if fi.particle_type and self._last_freq[0] \
+                not in self.particle_types:
+                    field = "all", field[1]
+            elif not fi.particle_type and self._last_freq[0] \
+                not in self.fluid_types:
+                    field = self.default_fluid_type, field[1]
             self._last_freq = field
-            self._last_finfo = self.field_info[fname]
             return self._last_finfo
         # We also should check "all" for particles, which can show up if you're
         # mixing deposition/gas fields with particle fields.
         if guessing_type and ("all", fname) in self.field_info:
             self._last_freq = ("all", fname)
             self._last_finfo = self.field_info["all", fname]
-            mylog.debug("Guessing field %s is (%s, %s)", fname,
-                        "all", fname)
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 

diff -r d234eedb9c777562fe4b2721b2a3af434472b34a -r 24983f726a224b0f5766e0511f5eb9f22f73f015 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -5,13 +5,23 @@
     ytcfg["yt","loglevel"] = "50"
     ytcfg["yt","__withintesting"] = "True"
 
+def _difference(x1, x2, dw):
+    rel = x1 - x2
+    rel[rel >  dw/2.0] -= dw
+    rel[rel < -dw/2.0] += dw
+    return rel
+
 def test_ellipsoid():
     # We decompose in different ways
-    cs = [np.array([0.5, 0.5, 0.5]),
+    cs = [
+          np.array([0.5, 0.5, 0.5]),
           np.array([0.1, 0.2, 0.3]),
-          np.array([0.8, 0.8, 0.8])]
+          np.array([0.8, 0.8, 0.8])
+          ]
+    np.random.seed(int(0x4d3d3d3))
     for nprocs in [1, 2, 4, 8]:
         pf = fake_random_pf(64, nprocs = nprocs)
+        DW = pf.domain_right_edge - pf.domain_left_edge
         min_dx = 2.0/pf.domain_dimensions
         ABC = np.random.random((3, 12)) * 0.1
         e0s = np.random.random((3, 12))
@@ -26,10 +36,17 @@
                 e0 = e0s[:,i]
                 tilt = tilts[i]
                 ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
-                yield assert_equal, np.all(ell["Radius"] <= A), True
+                yield assert_array_less, ell["Radius"], A
                 p = np.array([ell[ax] for ax in 'xyz'])
-                v  = np.zeros_like(ell["Radius"])
-                v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
-                v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
-                v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
-                yield assert_equal, np.all(np.sqrt(v) <= 1.0), True
+                dot_evec = [np.zeros_like(ell["Radius"]) for i in range(3)]
+                vecs = [ell._e0, ell._e1, ell._e2]
+                mags = [ell._A, ell._B, ell._C]
+                my_c = np.array([c]*p.shape[1]).transpose()
+                for ax_i in range(3):
+                    dist = _difference(p[ax_i,:], my_c[ax_i,:], DW[ax_i])
+                    for ax_j in range(3):
+                        dot_evec[ax_j] += dist * vecs[ax_j][ax_i]
+                dist = 0
+                for ax_i in range(3):
+                    dist += dot_evec[ax_i]**2.0 / mags[ax_i]**2.0
+                yield assert_array_less, dist, 1.0

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/d443194553b7/
Changeset:   d443194553b7
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-16 22:22:11
Summary:     Merging from yt-3.0
Affected #:  490 files

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -17,3 +17,4 @@
 tabel = tabel at slac.stanford.edu
 sername=kayleanelson = kaylea.nelson at yale.edu
 kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,5 @@
 0000000000000000000000000000000000000000 hop callback
 a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
 954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
+079e456c38a87676472a458210077e2be325dc85 last_gplv3

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 COPYING.txt
--- /dev/null
+++ b/COPYING.txt
@@ -0,0 +1,81 @@
+===============================
+ The yt project licensing terms
+===============================
+
+yt is licensed under the terms of the Modified BSD License (also known as New
+or Revised BSD), as follows:
+
+Copyright (c) 2013-, yt Development Team
+Copyright (c) 2006-2013, Matthew Turk <matthewturk at gmail.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the yt Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the yt Development Team
+-----------------------------
+
+Matthew Turk began yt in 2006 and remains the project lead.  Over time yt has
+grown to include contributions from a large number of individuals from many
+diverse institutions, scientific, and technical backgrounds.
+
+Until the fall of 2013, yt was licensed under the GPLv3.  However, with consent
+from all developers and on a public mailing list, yt has been relicensed under
+the BSD 3-clause under a shared copyright model.  For more information, see:
+http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2013-July/003239.html
+All versions of yt prior to this licensing change are available under the
+GPLv3; all subsequent versions are available under the BSD 3-clause license.
+
+The yt Development Team is the set of all contributors to the yt project.  This
+includes all of the yt subprojects.
+
+The core team that coordinates development on BitBucket can be found here:
+http://bitbucket.org/yt_analysis/ 
+
+
+Our Copyright Policy
+--------------------
+
+yt uses a shared copyright model. Each contributor maintains copyright
+over their contributions to yt. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the yt
+source code, in its entirety is not the copyright of any single person or
+institution.  Instead, it is the collective copyright of the entire yt
+Development Team.  If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the yt repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,51 +1,55 @@
-YT is a group effort.
+yt is a group effort.
 
-Contributors:                   Tom Abel (tabel at stanford.edu)
-				David Collins (dcollins at physics.ucsd.edu)
-				Brian Crosby (crosby.bd at gmail.com)
-				Andrew Cunningham (ajcunn at gmail.com)
-				Nathan Goldbaum (goldbaum at ucolick.org)
-				Markus Haider (markus.haider at uibk.ac.at)
-				Cameron Hummels (chummels at gmail.com)
-				Christian Karch (chiffre at posteo.de)
-				Ji-hoon Kim (me at jihoonkim.org)
-				Steffen Klemer (sklemer at phys.uni-goettingen.de)
-				Kacper Kowalik (xarthisius.kk at gmail.com)
-				Michael Kuhlen (mqk at astro.berkeley.edu)
-				Eve Lee (elee at cita.utoronto.ca)
-				Yuan Li (yuan at astro.columbia.edu)
-				Chris Malone (chris.m.malone at gmail.com)
-				Josh Maloney (joshua.moloney at colorado.edu)
-				Chris Moody (cemoody at ucsc.edu)
-				Andrew Myers (atmyers at astro.berkeley.edu)
-				Jeff Oishi (jsoishi at gmail.com)
-				Jean-Claude Passy (jcpassy at uvic.ca)
-				Mark Richardson (Mark.L.Richardson at asu.edu)
-				Thomas Robitaille (thomas.robitaille at gmail.com)
-				Anna Rosen (rosen at ucolick.org)
-				Anthony Scopatz (scopatz at gmail.com)
-				Devin Silvia (devin.silvia at colorado.edu)
-				Sam Skillman (samskillman at gmail.com)
-				Stephen Skory (s at skory.us)
-				Britton Smith (brittonsmith at gmail.com)
-				Geoffrey So (gsiisg at gmail.com)
-				Casey Stark (caseywstark at gmail.com)
-				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-				Stephanie Tonnesen (stonnes at gmail.com)
-				Matthew Turk (matthewturk at gmail.com)
-				Rich Wagner (rwagner at physics.ucsd.edu)
-				John Wise (jwise at physics.gatech.edu)
-				John ZuHone (jzuhone at gmail.com)
+Contributors:   
+                Tom Abel (tabel at stanford.edu)
+                David Collins (dcollins at physics.ucsd.edu)
+                Brian Crosby (crosby.bd at gmail.com)
+                Andrew Cunningham (ajcunn at gmail.com)
+                Hilary Egan (hilaryye at gmail.com)
+                John Forces (jforbes at ucolick.org)
+                Nathan Goldbaum (goldbaum at ucolick.org)
+                Markus Haider (markus.haider at uibk.ac.at)
+                Cameron Hummels (chummels at gmail.com)
+                Christian Karch (chiffre at posteo.de)
+                Ji-hoon Kim (me at jihoonkim.org)
+                Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Kacper Kowalik (xarthisius.kk at gmail.com)
+                Michael Kuhlen (mqk at astro.berkeley.edu)
+                Eve Lee (elee at cita.utoronto.ca)
+                Sam Leitner (sam.leitner at gmail.com)
+                Yuan Li (yuan at astro.columbia.edu)
+                Chris Malone (chris.m.malone at gmail.com)
+                Josh Maloney (joshua.moloney at colorado.edu)
+                Chris Moody (cemoody at ucsc.edu)
+                Andrew Myers (atmyers at astro.berkeley.edu)
+                Jill Naiman (jnaiman at ucolick.org)
+                Kaylea Nelson (kaylea.nelson at yale.edu)
+                Jeff Oishi (jsoishi at gmail.com)
+                Jean-Claude Passy (jcpassy at uvic.ca)
+                Mark Richardson (Mark.L.Richardson at asu.edu)
+                Thomas Robitaille (thomas.robitaille at gmail.com)
+                Anna Rosen (rosen at ucolick.org)
+                Douglas Rudd (drudd at uchicago.edu)
+                Anthony Scopatz (scopatz at gmail.com)
+                Noel Scudder (noel.scudder at stonybrook.edu)
+                Devin Silvia (devin.silvia at colorado.edu)
+                Sam Skillman (samskillman at gmail.com)
+                Stephen Skory (s at skory.us)
+                Britton Smith (brittonsmith at gmail.com)
+                Geoffrey So (gsiisg at gmail.com)
+                Casey Stark (caseywstark at gmail.com)
+                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Stephanie Tonnesen (stonnes at gmail.com)
+                Matthew Turk (matthewturk at gmail.com)
+                Rich Wagner (rwagner at physics.ucsd.edu)
+                Andrew Wetzel (andrew.wetzel at yale.edu)
+                John Wise (jwise at physics.gatech.edu)
+                John ZuHone (jzuhone at gmail.com)
 
-We also include the Delaunay Triangulation module written by Robert Kern of
-Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
+Several items included in the yt/extern directory were written by other
+individuals and may bear their own license, including the progressbar module by
 Nilton Volpato.  The PasteBin interface code (as well as the PasteBin itself)
-was written by the Pocoo collective (pocoo.org).  The RamsesRead++ library was
-developed by Oliver Hahn.  yt also includes a slightly-modified version of
-libconfig (http://www.hyperrealm.com/libconfig/) and an unmodified version of
-several routines from HEALpix (http://healpix.jpl.nasa.gov/).
-
-Large parts of development of yt were guided by discussions with Tom Abel, Ralf
-Kaehler, Mike Norman and Greg Bryan.
+was written by the Pocoo collective (pocoo.org).  
+developed by Oliver Hahn.  
 
 Thanks to everyone for all your contributions!

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 FUNDING
--- a/FUNDING
+++ /dev/null
@@ -1,40 +0,0 @@
-The development of yt has benefited from funding from many different sources
-and institutions.  Here is an incomplete list of these sources:
-
-  * NSF grant OCI-0904484
-  * NSF grant OCI-1048505
-  * NSF grant AST-0239709 
-  * NSF grant AST-0707474
-  * NSF grant AST-0708960
-  * NSF grant AST-0808184
-  * NSF grant AST-0807215 
-  * NSF grant AST-0807312
-  * NSF grant AST-0807075
-  * NSF grant AST-0908199
-  * NSF grant AST-0908553 
-  * NASA grant ATFP NNX08-AH26G
-  * NASA grant ATFP NNX09-AD80G
-  * NASA grant ATFP NNZ07-AG77G
-  * DOE Computational Science Graduate Fellowship under grant number DE-FG02-97ER25308
-
-Additionally, development of yt has benefited from the hospitality and hosting
-of the following institutions:
-
-  * Columbia University
-  * Harvard-Smithsonian Center for Astrophysics
-  * Institute for Advanced Study
-  * Kavli Institute for Cosmological Physics
-  * Kavli Institute for Particle Astrophysics and Cosmology
-  * Kavli Institute for Theoretical Physics
-  * Los Alamos National Lab
-  * Michigan State University
-  * Princeton University
-  * Stanford University
-  * University of California High-Performance Astro-Computing Center
-  * University of California at Berkeley
-  * University of California at San Diego
-  * University of California at Santa Cruz
-  * University of Chicago Research Computing Center
-  * University of Colorado at Boulder
-  * University of Maryland at College Park
-  * Yale University 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 LICENSE.txt
--- a/LICENSE.txt
+++ /dev/null
@@ -1,674 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year><name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year><name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ b/doc/how_to_develop_yt.txt
@@ -25,7 +25,7 @@
 Licenses
 --------
 
-All code in yt should be under the GPL-3 (preferred) or a compatible license.
+All code in yt should be under the BSD 3-clause license.
 
 How To Get The Source Code
 --------------------------

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -419,7 +419,7 @@
 echo "be installing ZeroMQ"
 
 printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
-get_willwont ${INST_0MQ}
+get_willwont ${INST_ROCKSTAR}
 echo "be installing Rockstar"
 
 echo
@@ -832,8 +832,8 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o &>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
@@ -844,7 +844,7 @@
 	    echo "Building LAPACK"
 	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
@@ -877,6 +877,11 @@
 mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
 echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
 echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
 do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
@@ -943,10 +948,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 setup.py
--- a/setup.py
+++ b/setup.py
@@ -247,7 +247,7 @@
         classifiers=["Development Status :: 5 - Production/Stable",
                      "Environment :: Console",
                      "Intended Audience :: Science/Research",
-                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "License :: OSI Approved :: BSD License",
                      "Operating System :: MacOS :: MacOS X",
                      "Operating System :: POSIX :: AIX",
                      "Operating System :: POSIX :: Linux",
@@ -268,7 +268,7 @@
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",
-        license="GPL-3",
+        license="BSD",
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -60,27 +60,17 @@
 All broadly useful code that doesn't clearly fit in one of the other
 categories goes here.
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 __version__ = "3.0-dev"
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/absorption_spectrum/__init__.py
--- a/yt/analysis_modules/absorption_spectrum/__init__.py
+++ b/yt/analysis_modules/absorption_spectrum/__init__.py
@@ -1,24 +1,14 @@
 """
 Import stuff for light cone generator.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -1,27 +1,17 @@
 """
 Absorption line generating functions.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 from yt.utilities.physical_constants import \

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -1,27 +1,17 @@
 """
 AbsorptionSpectrum class and member functions.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import h5py
 import numpy as np

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -1,33 +1,18 @@
 """
 API for absorption_spectrum
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .absorption_spectrum import \
     AbsorptionSpectrum
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -1,33 +1,18 @@
 """
 API for yt.analysis_modules
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .absorption_spectrum.api import \
     AbsorptionSpectrum
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/coordinate_transformation/api.py
--- a/yt/analysis_modules/coordinate_transformation/api.py
+++ b/yt/analysis_modules/coordinate_transformation/api.py
@@ -1,32 +1,17 @@
 """
 API for coordinate_transformation
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .transforms import \
     spherical_regrid

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/coordinate_transformation/transforms.py
--- a/yt/analysis_modules/coordinate_transformation/transforms.py
+++ b/yt/analysis_modules/coordinate_transformation/transforms.py
@@ -1,29 +1,17 @@
 """
 Transformations between coordinate systems
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: JS Oishi <jsoishi at astro.berkeley.edu>
-Organization: UC Berkeley
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2007-2011 Matthew Turk, J. S. Oishi.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 from yt.funcs import *
@@ -65,7 +53,7 @@
     new_grid['handled'] = np.zeros(new_grid['x'].shape, dtype='bool')
     for field in fields:
         new_grid[field] = np.zeros(new_grid['x'].shape, dtype='float64')
-    grid_order = np.argsort(data_source.gridLevels)
+    grid_order = np.argsort(data_source.grid_levels[:,0])
     ng = len(data_source._grids)
 
     for i,grid in enumerate(data_source._grids[grid_order][::-1]):

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/api.py
--- a/yt/analysis_modules/cosmological_observation/api.py
+++ b/yt/analysis_modules/cosmological_observation/api.py
@@ -1,29 +1,18 @@
 """
 API for cosmology analysis.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .cosmology_splice import \
     CosmologySplice
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -1,27 +1,17 @@
 """
 CosmologyTimeSeries class and member functions.
 
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: Michigan State University
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/__init__.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/__init__.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/__init__.py
@@ -1,24 +1,14 @@
 """
 Import stuff for light cone generator.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/api.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/api.py
@@ -1,33 +1,18 @@
 """
 API for lightcone
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .light_cone import \
     LightCone
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/common_n_volume.py
@@ -2,27 +2,17 @@
 Function to calculate volume in common between two n-cubes, with optional
 periodic boundary conditions.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/halo_mask.py
@@ -1,27 +1,17 @@
 """
 Light cone halo mask functions.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import copy
 import h5py

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone.py
@@ -1,27 +1,17 @@
 """
 LightCone class and member functions.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import copy
 import h5py

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/light_cone_projection.py
@@ -1,27 +1,17 @@
 """
 Create randomly centered, tiled projections to be used in light cones.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import numpy as np
 import copy

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
--- a/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
+++ b/yt/analysis_modules/cosmological_observation/light_cone/unique_solution.py
@@ -1,27 +1,17 @@
 """
 Functions to generate unique light cone solutions.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import copy
 import numpy as np

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_ray/api.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/api.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/api.py
@@ -1,32 +1,17 @@
 """
 API for light_ray
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .light_ray import \
     LightRay

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -1,27 +1,17 @@
 """
 LightRay class and member functions.
 
-Author: Britton Smith <brittons at origins.colorado.edu>
-Affiliation: CASA/University of CO, Boulder
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2012 Britton Smith.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import copy
 import h5py

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/halo_finding/api.py
--- a/yt/analysis_modules/halo_finding/api.py
+++ b/yt/analysis_modules/halo_finding/api.py
@@ -1,33 +1,18 @@
 """
 API for halo_finding
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from halo_objects import \
     Halo, \
     HOPHalo, \

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/halo_finding/fof/EnzoFOF.c
--- a/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
+++ b/yt/analysis_modules/halo_finding/fof/EnzoFOF.c
@@ -1,22 +1,10 @@
-/************************************************************************
-* Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
-*
-* This file is part of yt.
-*
-* yt is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; either version 3 of the License, or
-* (at your option) any later version.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-* GNU General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program.  If not, see <http://www.gnu.org/licenses/>.
-*
-************************************************************************/
+/*******************************************************************************
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+*******************************************************************************/
 
 //
 // EnzoFOF

diff -r 24983f726a224b0f5766e0511f5eb9f22f73f015 -r d443194553b773171ef4430f4e6dab86bbad5ee6 yt/analysis_modules/halo_finding/halo_objects.py
--- a/yt/analysis_modules/halo_finding/halo_objects.py
+++ b/yt/analysis_modules/halo_finding/halo_objects.py
@@ -1,31 +1,17 @@
 """
 HOP-output data handling
 
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Stephen Skory <s at skory.us>
-Affiliation: UCSD Physics/CASS
-Author: Geoffrey So <gsiisg at gmail.com> (Ellipsoidal functions)
-Affiliation: UCSD Physics/CASS
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
 
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
+"""
 
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
-"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
 
 import gc
 import h5py
@@ -1062,8 +1048,9 @@
     def __init__(self, data_source, dm_only=True, redshift=-1):
         """
         Run hop on *data_source* with a given density *threshold*.  If
-        *dm_only* is True (default), only run it on the dark matter particles, otherwise
-        on all particles.  Returns an iterable collection of *HopGroup* items.
+        *dm_only* is True (default), only run it on the dark matter particles, 
+        otherwise on all particles.  Returns an iterable collection of 
+        *HopGroup* items.
         """
         self._data_source = data_source
         self.dm_only = dm_only
@@ -2215,11 +2202,11 @@
                 self.comm.mpi_bcast(self.bucket_bounds)
             my_bounds = self.bucket_bounds[self.comm.rank]
             LE, RE = my_bounds[0], my_bounds[1]
-            self._data_source = self.hierarchy.region_strict([0.] * 3, LE, RE)
+            self._data_source = self.hierarchy.region([0.] * 3, LE, RE)
         # If this isn't parallel, define the region as an AMRRegionStrict so
         # particle IO works.
         if self.comm.size == 1:
-            self._data_source = self.hierarchy.periodic_region_strict([0.5] * 3,
+            self._data_source = self.hierarchy.region([0.5] * 3,
                 LE, RE)
         # get the average spacing between particles for this region
         # The except is for the serial case where the full box is what we want.
@@ -2305,8 +2292,7 @@
                 np.zeros(3, dtype='float64'))
         # If we're using a subvolume, we now re-divide.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
-                ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
             # Cut up the volume.
             padded, LE, RE, self._data_source = \
                 self.partition_hierarchy_3d(ds=self._data_source,
@@ -2503,7 +2489,7 @@
         # object representing the entire domain and sum it "lazily" with
         # Derived Quantities.
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE, ds_RE)
+            self._data_source = pf.h.region([0.] * 3, ds_LE, ds_RE)
         else:
             self._data_source = pf.h.all_data()
         self.padding = padding  # * pf["unitary"] # This should be clevererer
@@ -2599,7 +2585,7 @@
             linking_length = np.abs(link)
         self.padding = padding
         if subvolume is not None:
-            self._data_source = pf.h.periodic_region_strict([0.] * 3, ds_LE,
+            self._data_source = pf.h.region([0.] * 3, ds_LE,
                 ds_RE)
         else:
             self._data_source = pf.h.all_data()

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/ee7cb8433611/
Changeset:   ee7cb8433611
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-16 22:23:29
Summary:     Relicensing MOAB
Affected #:  5 files

diff -r d443194553b773171ef4430f4e6dab86bbad5ee6 -r ee7cb8433611eff976bd35e023525c9a51635696 yt/frontends/moab/api.py
--- a/yt/frontends/moab/api.py
+++ b/yt/frontends/moab/api.py
@@ -1,33 +1,18 @@
 """
-API for yt.frontends.gdf
+API for yt.frontends.moab
 
-Author: Samuel W. Skillman <samskillman at gmail.com>
-Affiliation: University of Colorado at Boulder
-Author: Matthew Turk <matthewturk at gmail.com>
-Affiliation: UCSD
-Author: J.S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Author: Britton Smith <brittonsmith at gmail.com>
-Affiliation: MSU
-License:
-  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from .data_structures import \
       MoabHex8Grid, \
       MoabHex8Hierarchy, \

diff -r d443194553b773171ef4430f4e6dab86bbad5ee6 -r ee7cb8433611eff976bd35e023525c9a51635696 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -1,14 +1,18 @@
-"""Data structures for MOAB Hex8.
+"""
+Data structures for MOAB Hex8.
 
-Author: Samuel W. Skillman <samskillman at gmail.com>
-Affiliation: University of Colorado at Boulder
-Author: Matthew Turk <matthewturk at gmail.com>
-Author: J. S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
+
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 import h5py
 import numpy as np
 import weakref

diff -r d443194553b773171ef4430f4e6dab86bbad5ee6 -r ee7cb8433611eff976bd35e023525c9a51635696 yt/frontends/moab/definitions.py
--- a/yt/frontends/moab/definitions.py
+++ b/yt/frontends/moab/definitions.py
@@ -1,25 +1,16 @@
 """
 Various definitions for various other modules and routines
 
-Author: J. S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
-License:
-  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
 
-  This file is part of yt.
-
-  yt is free software; you can redistribute it and/or modify
-  it under the terms of the GNU General Public License as published by
-  the Free Software Foundation; either version 3 of the License, or
-  (at your option) any later version.
-
-  This program is distributed in the hope that it will be useful,
-  but WITHOUT ANY WARRANTY; without even the implied warranty of
-  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-  GNU General Public License for more details.
-
-  You should have received a copy of the GNU General Public License
-  along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 """
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+

diff -r d443194553b773171ef4430f4e6dab86bbad5ee6 -r ee7cb8433611eff976bd35e023525c9a51635696 yt/frontends/moab/fields.py
--- a/yt/frontends/moab/fields.py
+++ b/yt/frontends/moab/fields.py
@@ -1,13 +1,16 @@
 """MOAB-specific fields
 
-Author: Samuel W. Skillman <samskillman at gmail.com>
-Affiliation: University of Colorado at Boulder
-Author: J. S. Oishi <jsoishi at gmail.com>
-Affiliation: KIPAC/SLAC/Stanford
-Homepage: http://yt-project.org/
 
 """
 
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 from yt.data_objects.field_info_container import \
     FieldInfoContainer, \
     FieldInfo, \

diff -r d443194553b773171ef4430f4e6dab86bbad5ee6 -r ee7cb8433611eff976bd35e023525c9a51635696 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -1,9 +1,16 @@
-"""The data-file handling functions
+"""MOAB-specific fields
 
-Author: Anthony Scopatz <scopatz at gmail.com>
-Affiliation: The University of Wisconsin-Madison
 
 """
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
 import numpy as np
 from yt.funcs import mylog
 from yt.utilities.io_handler import BaseIOHandler


https://bitbucket.org/yt_analysis/yt-3.0/commits/343586add2ea/
Changeset:   343586add2ea
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-16 23:37:46
Summary:     First iteration of unstructured (semi-structured, really) reader/handler.
Affected #:  6 files

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/data_objects/unstructured_mesh.py
--- /dev/null
+++ b/yt/data_objects/unstructured_mesh.py
@@ -0,0 +1,187 @@
+"""
+Unstructured mesh base container.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import exceptions
+import pdb
+import weakref
+import itertools
+import numpy as np
+
+from yt.funcs import *
+from yt.utilities.definitions import x_dict, y_dict
+
+from yt.data_objects.data_containers import \
+    YTFieldData, \
+    YTDataContainer, \
+    YTSelectionContainer
+from yt.utilities.definitions import x_dict, y_dict
+from .field_info_container import \
+    NeedsGridType, \
+    NeedsOriginalGrid, \
+    NeedsDataField, \
+    NeedsProperty, \
+    NeedsParameter
+from yt.geometry.selection_routines import convert_mask_to_indices
+import yt.geometry.particle_deposit as particle_deposit
+
+class SemiStructuredMesh(YTSelectionContainer):
+    _spatial = False
+    _connectivity_length = 8
+    _type_name = 'semi_structured_mesh'
+    _skip_add = True
+    _index_offset = 0
+    _con_args = ('mesh_id', 'filename', 'connectivity_indices',
+                 'connectivity_coords')
+    _container_fields = ("dx", "dy", "dz")
+
+    def __init__(self, mesh_id, filename, connectivity_indices,
+                 connectivity_coords, hierarchy):
+        if self._connectivity_length != 8:
+            raise NotImplementedError
+        self.field_data = YTFieldData()
+        self.filename = filename
+        self.field_parameters = {}
+        self.mesh_id = mesh_id
+        # This is where we set up the connectivity information
+        self.connectivity_indices = connectivity_indices
+        self.connectivity_coords = connectivity_coords
+        if hierarchy: self.hierarchy = weakref.proxy(hierarchy)
+        self.pf = self.hierarchy.parameter_file  # weakref already
+        self._last_mask = None
+        self._last_count = -1
+        self._last_selector_id = None
+        self._current_particle_type = 'all'
+        self._current_fluid_type = self.pf.default_fluid_type
+
+    def _check_consistency(self):
+        for gi in range(self.connectivity_indices.shape[0]):
+            v = set([])
+            ind = self.connectivity_indices[gi, :] - self._index_offset
+            coords = self.connectivity_coords[ind, :]
+            for i in range(3):
+                assert(np.unique(coords[:,i]).size == 2)
+        mylog.debug("Connectivity is consistent.")
+
+    def __repr__(self):
+        return "SemiStructuredMesh_%04i" % (self.mesh_id)
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+
+        """
+        raise NotImplementedError
+
+    def convert(self, datatype):
+        """
+        This will attempt to convert a given unit to cgs from code units. It
+        either returns the multiplicative factor or throws a KeyError.
+
+        """
+        return self.pf[datatype]
+
+    @property
+    def shape(self):
+        raise NotImplementedError
+
+    def _generate_container_field(self, field):
+        if self._current_chunk is None:
+            self.hierarchy._identify_base_chunk(self)
+        if field == "dx":
+            return self._current_chunk.fwidth[:,0]
+        elif field == "dy":
+            return self._current_chunk.fwidth[:,1]
+        elif field == "dz":
+            return self._current_chunk.fwidth[:,2]
+
+    def select_icoords(self, dobj):
+        raise NotImplementedError
+
+    def select_fcoords(self, dobj = None):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,3), dtype='float64')
+        ind = self.connectivity_indices - self._index_offset
+        centers = self.connectivity_coords[ind, :].sum(axis=1)
+        np.divide(centers, 8, centers)
+        return centers[mask, :]
+
+    def select_fwidth(self, dobj):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty((0,3), dtype='float64')
+        ind = self.connectivity_indices - self._index_offset
+        LE = self.connectivity_coords[ind, :].min(axis=1)
+        RE = self.connectivity_coords[ind, :].max(axis=1)
+        return (RE - LE)[mask, :]
+
+    def select_ires(self, dobj):
+        ind = np.zeros(self.connectivity_indices.shape[0])
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty(0, dtype='int32')
+        return ind[mask]
+
+    def tcoords(self, dobj):
+        dt, t = dobj.selector.get_dt(self)
+        return dt, t
+
+    def deposit(self, positions, fields = None, method = None):
+        raise NotImplementedError
+        # Here we perform our particle deposition.
+        cls = getattr(particle_deposit, "deposit_%s" % method, None)
+        if cls is None:
+            raise YTParticleDepositionNotImplemented(method)
+        op = cls(self.ActiveDimensions.prod()) # We allocate number of zones, not number of octs
+        op.initialize()
+        op.process_grid(self, positions, fields)
+        vals = op.finalize()
+        if vals is None: return
+        return vals.reshape(self.ActiveDimensions, order="C")
+
+    def select_blocks(self, selector):
+        mask = self._get_selector_mask(selector)
+        yield self, mask
+
+    def _get_selector_mask(self, selector):
+        if hash(selector) == self._last_selector_id:
+            mask = self._last_mask
+        else:
+            self._last_mask = mask = selector.fill_mesh_mask(self)
+            self._last_selector_id = hash(selector)
+            if mask is None:
+                self._last_count = 0
+            else:
+                self._last_count = mask.sum()
+        return mask
+
+    def select(self, selector, source, dest, offset):
+        mask = self._get_selector_mask(selector)
+        count = self.count(selector)
+        if count == 0: return 0
+        dest[offset:offset+count] = source[mask]
+        return count
+
+    def count(self, selector):
+        mask = self._get_selector_mask(selector)
+        if mask is None: return 0
+        return self._last_count
+
+    def count_particles(self, selector, x, y, z):
+        # We don't cache the selector results
+        count = selector.count_points(x,y,z)
+        return count
+
+    def select_particles(self, selector, x, y, z):
+        mask = selector.select_points(x,y,z)
+        return mask

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/frontends/moab/api.py
--- a/yt/frontends/moab/api.py
+++ b/yt/frontends/moab/api.py
@@ -14,7 +14,7 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      MoabHex8Grid, \
+      MoabHex8Mesh, \
       MoabHex8Hierarchy, \
       MoabHex8StaticOutput
 

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -17,10 +17,11 @@
 import numpy as np
 import weakref
 from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.geometry.grid_geometry_handler import \
-           GridGeometryHandler
+from yt.data_objects.unstructured_mesh import \
+           SemiStructuredMesh
+from yt.geometry.unstructured_mesh_handler import \
+           UnstructuredGeometryHandler
+from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
            StaticOutput
 from yt.utilities.lib import \
@@ -40,149 +41,69 @@
         return data.convert(fname)
     return _conv
 
-class MoabHex8Grid(AMRGridPatch):
-    _id_offset = 0
-    def __init__(self, id, hierarchy, level, start, dimensions):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
-                              hierarchy = hierarchy)
-        self.Parent = []
-        self.Children = []
-        self.Level = level
-        self.start_index = start.copy()
-        self.stop_index = self.start_index + dimensions
-        self.ActiveDimensions = dimensions.copy()
+class MoabHex8Mesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 1
 
-    def _setup_dx(self):
-        # So first we figure out what the index is.  We don't assume
-        # that dx=dy=dz , at least here.  We probably do elsewhere.
-        id = self.id - self._id_offset
-        if len(self.Parent) > 0:
-            self.dds = self.Parent[0].dds / self.pf.refine_by
-        else:
-            LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
-            self.dds = np.array((RE-LE)/self.ActiveDimensions)
-        if self.pf.dimensionality < 2: self.dds[1] = 1.0
-        if self.pf.dimensionality < 3: self.dds[2] = 1.0
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    @property
-    def filename(self):
-        return None
-
-class MoabHex8Hierarchy(GridGeometryHandler):
-
-    grid = MoabHex8Grid
+class MoabHex8Hierarchy(UnstructuredGeometryHandler):
 
     def __init__(self, pf, data_style='h5m'):
         self.parameter_file = weakref.proxy(pf)
         self.data_style = data_style
-        self.max_level = 10  # FIXME
         # for now, the hierarchy file is the parameter file!
         self.hierarchy_filename = self.parameter_file.parameter_filename
         self.directory = os.path.dirname(self.hierarchy_filename)
         self._fhandle = h5py.File(self.hierarchy_filename,'r')
-        GridGeometryHandler.__init__(self,pf,data_style)
+
+        UnstructuredGeometryHandler.__init__(self, pf, data_style)
 
         self._fhandle.close()
 
-    def _initialize_data_storage(self):
-        pass
+    def _initialize_mesh(self):
+        con = self._fhandle["/tstt/elements/Hex8/connectivity"][:]
+        con = np.array(con, dtype="int64")
+        coords = self._fhandle["/tstt/nodes/coordinates"][:]
+        coords = np.array(coords, dtype="float64")
+        self.meshes = [MoabHex8Mesh(0, self.hierarchy_filename, con,
+                                    coords, self)]
 
     def _detect_fields(self):
-        self.field_list = self._fhandle['field_types'].keys()
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        GridGeometryHandler._setup_classes(self, dd)
-        self.object_types.sort()
+        self.field_list = self._fhandle['/tstt/elements/Hex8/tags'].keys()
 
     def _count_grids(self):
         self.num_grids = 1 #self._fhandle['/grid_parent_id'].shape[0]
 
-    def _parse_hierarchy(self):
-        f = self._fhandle
-        dxs = []
-        self.grids = np.empty(self.num_grids, dtype='object')
-        levels = [0]
-        glis = (f['grid_left_index'][:]).copy()
-        gdims = (f['grid_dimensions'][:]).copy()
-        active_dims = ~((np.max(gdims, axis=0) == 1) &
-                        (self.parameter_file.domain_dimensions == 1))
-
-        for i in range(levels.shape[0]):
-            self.grids[i] = self.grid(i, self, levels[i],
-                                      glis[i],
-                                      gdims[i])
-            self.grids[i]._level_id = levels[i]
-
-            dx = (self.parameter_file.domain_right_edge-
-                  self.parameter_file.domain_left_edge)/self.parameter_file.domain_dimensions
-            dx[active_dims] = dx[active_dims]/self.parameter_file.refine_by**(levels[i])
-            dxs.append(dx)
-        dx = np.array(dxs)
-        self.grid_left_edge = self.parameter_file.domain_left_edge + dx*glis
-        self.grid_dimensions = gdims.astype("int32")
-        self.grid_right_edge = self.grid_left_edge + dx*self.grid_dimensions
-        self.grid_particle_count = f['grid_particle_count'][:]
-        del levels, glis, gdims
-
-    def _populate_grid_objects(self):
-        mask = np.empty(self.grids.size, dtype='int32')
-        for gi, g in enumerate(self.grids):
-            g._prepare_grid()
-            g._setup_dx()
-
-        for gi, g in enumerate(self.grids):
-            g.Children = self._get_grid_children(g)
-            for g1 in g.Children:
-                g1.Parent.append(g)
-            get_box_grids_level(self.grid_left_edge[gi,:],
-                                self.grid_right_edge[gi,:],
-                                self.grid_levels[gi],
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            m = mask.astype("bool")
-            m[gi] = False
-            siblings = self.grids[gi:][m[gi:]]
-            if len(siblings) > 0:
-                g.OverlappingSiblings = siblings.tolist()
-        self.max_level = self.grid_levels.max()
-
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-    def _get_box_grids(self, left_edge, right_edge):
-        '''
-        Gets back all the grids between a left edge and right edge
-        '''
-        eps = np.finfo(np.float64).eps
-        grid_i = np.where((np.all((self.grid_right_edge - left_edge) > eps, axis=1) \
-                        &  np.all((right_edge - self.grid_left_edge) > eps, axis=1)) == True)
-
-        return self.grids[grid_i], grid_i
-
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self._get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        return [g for g in self.grids[mask] if g.Level == grid.Level + 1]
-
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
+    def _chunk_all(self, dobj, cache = True):
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", gobjs, None, cache)
+        
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        raise NotImplementedError
+
+    def _chunk_io(self, dobj, cache = True):
+        gfiles = defaultdict(list)
+        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for g in gobjs:
+            gfiles[g.filename].append(g)
+        for fn in sorted(gfiles):
+            gs = gfiles[fn]
+            yield YTDataChunk(dobj, "io", gs, None, cache = cache)
 
 class MoabHex8StaticOutput(StaticOutput):
     _hierarchy_class = MoabHex8Hierarchy
     _fieldinfo_fallback = MoabFieldInfo
     _fieldinfo_known = KnownMoabFields
+    periodicity = (False, False, False)
 
-    def __init__(self, filename, data_style='grid_data_format',
+    def __init__(self, filename, data_style='moab_hex8',
                  storage_filename = None):
         StaticOutput.__init__(self, filename, data_style)
         self.storage_filename = storage_filename
         self.filename = filename
+        self._handle = h5py.File(self.parameter_filename, "r")
 
     def _set_units(self):
         """Generates the conversion to various physical _units based on the parameter file
@@ -200,26 +121,6 @@
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
 
-        # This should be improved.
-        self._handle = h5py.File(self.parameter_filename, "r")
-        """\
-        for field_name in self._handle["/field_types"]:
-            current_field = self._handle["/field_types/%s" % field_name]
-            if 'field_to_cgs' in current_field.attrs:
-                self.units[field_name] = current_field.attrs['field_to_cgs']
-            else:
-                self.units[field_name] = 1.0
-            if 'field_units' in current_field.attrs:
-                current_fields_unit = just_one(current_field.attrs['field_units'])
-            else:
-                current_fields_unit = ""
-            self._fieldinfo_known.add_field(field_name, function=NullFunc, take_log=False,
-                   units=current_fields_unit, projected_units="",
-                   convert_function=_get_convert(field_name))
-        """
-        self._handle.close()
-        del self._handle
-
     def _parse_parameter_file(self):
         self._handle = f = h5py.File(self.parameter_filename, "r")
         coords = self._handle["/tstt/nodes/coordinates"]
@@ -232,17 +133,11 @@
         self.unique_identifier = self.parameter_filename
         self.cosmological_simulation = False
         self.num_ghost_zones = 0
-        #self.field_ordering = sp["field_ordering"]
-        #self.boundary_conditions = sp["boundary_conditions"][:]
-        #p = [bnd == 0 for bnd in self.boundary_conditions[::2]]
-        #self.periodicity = ensure_tuple(p)
         self.current_redshift = self.omega_lambda = self.omega_matter \
                               = self.hubble_constant \
                               = self.cosmological_simulation = 0.0
         self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
         self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
-        self._handle.close()
-        del self._handle
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -22,13 +22,10 @@
 
 # TODO all particle bits were removed
 class IOHandlerMoabH5MHex8(BaseIOHandler):
-    _data_style = "h5m"
-    _offset_string = 'data:offsets=0'
-    _data_string = 'data:datatype=0'
+    _data_style = "moab_hex8"
 
     def __init__(self, pf, *args, **kwargs):
         # TODO check if _num_per_stride is needed
-        self._num_per_stride = kwargs.pop("num_per_stride", 1000000)
         BaseIOHandler.__init__(self, *args, **kwargs)
         self.pf = pf
         self._handle = pf._handle

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -322,6 +322,35 @@
                 rel += self.domain_width[d]
         return rel
 
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    #@cython.cdivision(True)
+    def fill_mesh_mask(self, mesh):
+        cdef int dim[3]
+        cdef np.float64_t pos[3]
+        cdef np.ndarray[np.int64_t, ndim=2] indices
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        cdef int i, j, k, selected
+        cdef int npoints, nv = mesh._connectivity_length
+        cdef int total = 0
+        cdef int offset = mesh._index_offset
+        coords = mesh.connectivity_coords
+        indices = mesh.connectivity_indices
+        npoints = indices.shape[0]
+        mask = np.zeros(npoints, dtype='uint8')
+        for i in range(npoints):
+            selected = 0
+            for j in range(nv):
+                for k in range(3):
+                    pos[k] = coords[indices[i, j] - offset, k]
+                selected = self.select_point(pos)
+                if selected == 1: break
+            total += selected
+            mask[i] = selected
+        if total == 0: return None
+        return mask.astype("bool")
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r ee7cb8433611eff976bd35e023525c9a51635696 -r 343586add2ead5411267619069aac3c1601d62b1 yt/geometry/unstructured_mesh_handler.py
--- /dev/null
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -0,0 +1,99 @@
+"""
+Unstructured-mesh geometry handler
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import h5py
+import numpy as na
+import string, re, gc, time, cPickle
+import weakref
+
+from itertools import chain, izip
+
+from yt.funcs import *
+from yt.utilities.logger import ytLogger as mylog
+from yt.arraytypes import blankRecordArray
+from yt.config import ytcfg
+from yt.data_objects.field_info_container import NullFunc
+from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
+from yt.geometry.particle_oct_container import \
+    ParticleOctreeContainer, ParticleRegions
+from yt.utilities.definitions import MAXLEVEL
+from yt.utilities.io_handler import io_registry
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    ParallelAnalysisInterface, parallel_splitter
+
+from yt.data_objects.data_containers import data_object_registry
+from yt.data_objects.octree_subset import ParticleOctreeSubset
+
+class UnstructuredGeometryHandler(GeometryHandler):
+    _global_mesh = False
+
+    def __init__(self, pf, data_style):
+        self.data_style = data_style
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.float_type = np.float64
+        super(UnstructuredGeometryHandler, self).__init__(pf, data_style)
+
+    def _setup_geometry(self):
+        mylog.debug("Initializing Unstructured Mesh Geometry Handler.")
+        self._initialize_mesh()
+
+    def get_smallest_dx(self):
+        """
+        Returns (in code units) the smallest cell size in the simulation.
+        """
+        raise NotImplementedError
+
+    def convert(self, unit):
+        return self.parameter_file.conversion_factors[unit]
+
+    def _initialize_mesh(self):
+        raise NotImplementedError
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        super(UnstructuredGeometryHandler, self)._setup_classes(dd)
+        self.object_types.sort()
+
+    def _identify_base_chunk(self, dobj):
+        if getattr(dobj, "_chunk_info", None) is None:
+            dobj._chunk_info = self.meshes
+        dobj._current_chunk = list(self._chunk_all(dobj))[0]
+
+    def _chunk_all(self, dobj):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        yield YTDataChunk(dobj, "all", oobjs, None)
+
+    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
+        sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        # We actually do not really use the data files except as input to the
+        # ParticleOctreeSubset.
+        # This is where we will perform cutting of the Octree and
+        # load-balancing.  That may require a specialized selector object to
+        # cut based on some space-filling curve index.
+        for i,og in enumerate(sobjs):
+            if ngz > 0:
+                g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
+            else:
+                g = og
+            yield YTDataChunk(dobj, "spatial", [g])
+
+    def _chunk_io(self, dobj, cache = True):
+        oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
+        for subset in oobjs:
+            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)


https://bitbucket.org/yt_analysis/yt-3.0/commits/ade8302ae6ef/
Changeset:   ade8302ae6ef
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-16 23:46:52
Summary:     Adding some IO handling.
Affected #:  1 file

diff -r 343586add2ead5411267619069aac3c1601d62b1 -r ade8302ae6ef70a2792c3fb934961bf728529ece yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -19,7 +19,6 @@
 def field_dname(field_name):
     return "/tstt/elements/Hex8/tags/{0}".format(field_name)
 
-
 # TODO all particle bits were removed
 class IOHandlerMoabH5MHex8(BaseIOHandler):
     _data_style = "moab_hex8"
@@ -32,6 +31,7 @@
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
         chunks = list(chunks)
+        assert(len(chunks) == 1)
         fhandle = self._handle
         rv = {}
         for field in fields:
@@ -40,10 +40,11 @@
         ngrids = sum(len(chunk.objs) for chunk in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [fname for ftype, fname in fields], ngrids)
-        x, y, z = fhandle['/tstt/nodes/coordinates'][:].T
-        mask = selector.select_points(x, y, z)
         for field in fields:
             ftype, fname = field
-            data = fhandle[field_dname(fname)][mask]
-            rv[field][:] = data
+            ds = np.array(fhandle[field_dname(fname)][:], dtype="float64")
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    ind += g.select(selector, ds, rv[field], ind) # caches
         return rv


https://bitbucket.org/yt_analysis/yt-3.0/commits/8450e04a1c7b/
Changeset:   8450e04a1c7b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 00:04:36
Summary:     Semi-structured mesh works sort of.
Affected #:  5 files

diff -r ade8302ae6ef70a2792c3fb934961bf728529ece -r 8450e04a1c7bae94417101b4b5a910f8be260f77 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -157,7 +157,7 @@
         if hash(selector) == self._last_selector_id:
             mask = self._last_mask
         else:
-            self._last_mask = mask = selector.fill_mesh_mask(self)
+            self._last_mask = mask = selector.fill_mesh_cell_mask(self)
             self._last_selector_id = hash(selector)
             if mask is None:
                 self._last_count = 0

diff -r ade8302ae6ef70a2792c3fb934961bf728529ece -r 8450e04a1c7bae94417101b4b5a910f8be260f77 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -76,22 +76,6 @@
     def _setup_data_io(self):
         self.io = io_registry[self.data_style](self.parameter_file)
 
-    def _chunk_all(self, dobj, cache = True):
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", gobjs, None, cache)
-        
-    def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
-        raise NotImplementedError
-
-    def _chunk_io(self, dobj, cache = True):
-        gfiles = defaultdict(list)
-        gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        for g in gobjs:
-            gfiles[g.filename].append(g)
-        for fn in sorted(gfiles):
-            gs = gfiles[fn]
-            yield YTDataChunk(dobj, "io", gs, None, cache = cache)
-
 class MoabHex8StaticOutput(StaticOutput):
     _hierarchy_class = MoabHex8Hierarchy
     _fieldinfo_fallback = MoabFieldInfo

diff -r ade8302ae6ef70a2792c3fb934961bf728529ece -r 8450e04a1c7bae94417101b4b5a910f8be260f77 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -30,6 +30,7 @@
         self._handle = pf._handle
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
+        assert size
         chunks = list(chunks)
         assert(len(chunks) == 1)
         fhandle = self._handle

diff -r ade8302ae6ef70a2792c3fb934961bf728529ece -r 8450e04a1c7bae94417101b4b5a910f8be260f77 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -18,7 +18,7 @@
 cimport numpy as np
 cimport cython
 from libc.stdlib cimport malloc, free
-from fp_utils cimport fclip, iclip
+from fp_utils cimport fclip, iclip, fmax, fmin
 from selection_routines cimport SelectorObject
 from oct_container cimport OctreeContainer, OctAllocationContainer, Oct
 cimport oct_visitors
@@ -322,9 +322,9 @@
                 rel += self.domain_width[d]
         return rel
 
-    #@cython.boundscheck(False)
-    #@cython.wraparound(False)
-    #@cython.cdivision(True)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_mesh_mask(self, mesh):
         cdef int dim[3]
         cdef np.float64_t pos[3]
@@ -351,6 +351,38 @@
         if total == 0: return None
         return mask.astype("bool")
 
+    def fill_mesh_cell_mask(self, mesh):
+        cdef int dim[3]
+        cdef np.float64_t pos, le[3], re[3]
+        cdef np.ndarray[np.int64_t, ndim=2] indices
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        cdef np.ndarray[np.uint8_t, ndim=1] mask
+        cdef int i, j, k, selected
+        cdef int npoints, nv = mesh._connectivity_length
+        cdef int total = 0
+        cdef int offset = mesh._index_offset
+        if nv != 8:
+            raise RuntimeError
+        coords = mesh.connectivity_coords
+        indices = mesh.connectivity_indices
+        npoints = indices.shape[0]
+        mask = np.zeros(npoints, dtype='uint8')
+        for i in range(npoints):
+            selected = 0
+            for k in range(3):
+                le[k] = 1e60
+                re[k] = -1e60
+            for j in range(nv):
+                for k in range(3):
+                    pos = coords[indices[i, j] - offset, k]
+                    le[k] = fmin(pos, le[k])
+                    re[k] = fmax(pos, re[k])
+            selected = self.select_bbox(le, re)
+            total += selected
+            mask[i] = selected
+        if total == 0: return None
+        return mask.astype("bool")
+
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)

diff -r ade8302ae6ef70a2792c3fb934961bf728529ece -r 8450e04a1c7bae94417101b4b5a910f8be260f77 yt/geometry/unstructured_mesh_handler.py
--- a/yt/geometry/unstructured_mesh_handler.py
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -73,11 +73,18 @@
     def _identify_base_chunk(self, dobj):
         if getattr(dobj, "_chunk_info", None) is None:
             dobj._chunk_info = self.meshes
+        if getattr(dobj, "size", None) is None:
+            dobj.size = self._count_selection(dobj)
         dobj._current_chunk = list(self._chunk_all(dobj))[0]
 
-    def _chunk_all(self, dobj):
+    def _count_selection(self, dobj, meshes = None):
+        if meshes is None: meshes = dobj._chunk_info
+        count = sum((m.count(dobj.selector) for m in meshes))
+        return count
+
+    def _chunk_all(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
-        yield YTDataChunk(dobj, "all", oobjs, None)
+        yield YTDataChunk(dobj, "all", oobjs, dobj.size, cache)
 
     def _chunk_spatial(self, dobj, ngz, sort = None, preload_fields = None):
         sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -91,9 +98,12 @@
                 g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
             else:
                 g = og
-            yield YTDataChunk(dobj, "spatial", [g])
+            size = self._count_selection(dobj, [og])
+            if size == 0: continue
+            yield YTDataChunk(dobj, "spatial", [g], size)
 
     def _chunk_io(self, dobj, cache = True):
         oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
         for subset in oobjs:
-            yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
+            s = self._count_selection(dobj, oobjs)
+            yield YTDataChunk(dobj, "io", [subset], s, cache = cache)


https://bitbucket.org/yt_analysis/yt-3.0/commits/4eb5fe6b03ec/
Changeset:   4eb5fe6b03ec
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 20:33:32
Summary:     Move fill_fcoords and fill_fwidths to Cython for semi-structured mesh.
Affected #:  6 files

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -21,6 +21,8 @@
 
 from yt.funcs import *
 from yt.utilities.definitions import x_dict, y_dict
+from yt.utilities.lib import \
+    fill_fcoords, fill_fwidths
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
@@ -113,18 +115,18 @@
     def select_fcoords(self, dobj = None):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
-        ind = self.connectivity_indices - self._index_offset
-        centers = self.connectivity_coords[ind, :].sum(axis=1)
-        np.divide(centers, 8, centers)
+        centers = fill_fcoords(self.connectivity_coords,
+                               self.connectivity_indices,
+                               self._index_offset)
         return centers[mask, :]
 
     def select_fwidth(self, dobj):
         mask = self._get_selector_mask(dobj.selector)
         if mask is None: return np.empty((0,3), dtype='float64')
-        ind = self.connectivity_indices - self._index_offset
-        LE = self.connectivity_coords[ind, :].min(axis=1)
-        RE = self.connectivity_coords[ind, :].max(axis=1)
-        return (RE - LE)[mask, :]
+        widths = fill_fwidths(self.connectivity_coords,
+                              self.connectivity_indices,
+                              self._index_offset)
+        return widths[mask, :]
 
     def select_ires(self, dobj):
         ind = np.zeros(self.connectivity_indices.shape[0])

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -61,9 +61,9 @@
 
     def _initialize_mesh(self):
         con = self._fhandle["/tstt/elements/Hex8/connectivity"][:]
-        con = np.array(con, dtype="int64")
+        con = np.asarray(con, dtype="int64")
         coords = self._fhandle["/tstt/nodes/coordinates"][:]
-        coords = np.array(coords, dtype="float64")
+        coords = np.asarray(coords, dtype="float64")
         self.meshes = [MoabHex8Mesh(0, self.hierarchy_filename, con,
                                     coords, self)]
 

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -351,6 +351,9 @@
         if total == 0: return None
         return mask.astype("bool")
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def fill_mesh_cell_mask(self, mesh):
         cdef int dim[3]
         cdef np.float64_t pos, le[3], re[3]

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -29,3 +29,4 @@
 from .marching_cubes import *
 from .GridTree import *
 from .write_array import *
+from .mesh_utilities import *

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/utilities/lib/mesh_utilities.pyx
--- /dev/null
+++ b/yt/utilities/lib/mesh_utilities.pyx
@@ -0,0 +1,72 @@
+"""
+Utilities for unstructured and semi-structured meshes
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from libc.stdlib cimport malloc, free, abs
+from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def fill_fcoords(np.ndarray[np.float64_t, ndim=2] coords,
+                 np.ndarray[np.int64_t, ndim=2] indices,
+                 int offset = 0):
+    cdef np.ndarray[np.float64_t, ndim=2] fcoords
+    cdef int nc = indices.shape[0]
+    cdef int nv = indices.shape[1]
+    cdef np.float64_t pos[3]
+    cdef int i, j, k
+    fcoords = np.empty((nc, 3), dtype="float64")
+    for i in range(nc):
+        for j in range(3):
+            pos[j] = 0.0
+        for j in range(nv):
+            for k in range(3):
+                pos[k] += coords[indices[i, j] - offset, k]
+        for j in range(3):
+            pos[j] /= nv
+            fcoords[i, j] = pos[j]
+    return fcoords
+
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def fill_fwidths(np.ndarray[np.float64_t, ndim=2] coords,
+                 np.ndarray[np.int64_t, ndim=2] indices,
+                 int offset = 0):
+    cdef np.ndarray[np.float64_t, ndim=2] fwidths
+    cdef int nc = indices.shape[0]
+    cdef int nv = indices.shape[1]
+    if nv != 8:
+        raise NotImplementedError
+    cdef np.float64_t LE[3], RE[3]
+    cdef int i, j, k
+    cdef np.float64_t pos
+    fwidths = np.empty((nc, 3), dtype="float64")
+    for i in range(nc):
+        for j in range(3):
+            LE[j] = 1e60
+            RE[j] = -1e60
+        for j in range(nv):
+            for k in range(3):
+                pos = coords[indices[i, j] - offset, k]
+                LE[k] = fmin(pos, LE[k])
+                RE[k] = fmax(pos, RE[k])
+        for j in range(3):
+            fwidths[i, j] = RE[j] - LE[j]
+    return fwidths
+

diff -r 8450e04a1c7bae94417101b4b5a910f8be260f77 -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -141,6 +141,13 @@
                           "yt/utilities/lib/FixedInterpolator.h",
                           "yt/utilities/lib/kdtree.h"],
           )
+    config.add_extension("mesh_utilities",
+              ["yt/utilities/lib/mesh_utilities.pyx"],
+               include_dirs=["yt/utilities/lib/"],
+               libraries=["m"], 
+               depends = ["yt/utilities/lib/fp_utils.pxd",
+                          ],
+          )
     config.add_extension("grid_traversal", 
                ["yt/utilities/lib/grid_traversal.pyx",
                 "yt/utilities/lib/FixedInterpolator.c",


https://bitbucket.org/yt_analysis/yt-3.0/commits/1243ac095c3f/
Changeset:   1243ac095c3f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 23:17:11
Summary:     Fixing corner cases for walk_volume.
Affected #:  1 file

diff -r 4eb5fe6b03ec9978a346c66385e6253e41b3bf4a -r 1243ac095c3f943cf62caf285fc486b452529d08 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -954,6 +954,14 @@
             tl = (vc.right_edge[i] - v_pos[i])*iv_dir[i]
         temp_x = (v_pos[x] + tl*v_dir[x])
         temp_y = (v_pos[y] + tl*v_dir[y])
+        if fabs(temp_x - vc.left_edge[x]) < 1e-10*vc.dds[x]:
+            temp_x = vc.left_edge[x]
+        elif fabs(temp_x - vc.right_edge[x]) < 1e-10*vc.dds[x]:
+            temp_x = vc.right_edge[x]
+        if fabs(temp_y - vc.left_edge[y]) < 1e-10*vc.dds[y]:
+            temp_y = vc.left_edge[y]
+        elif fabs(temp_y - vc.right_edge[y]) < 1e-10*vc.dds[y]:
+            temp_y = vc.right_edge[y]
         if vc.left_edge[x] <= temp_x and temp_x <= vc.right_edge[x] and \
            vc.left_edge[y] <= temp_y and temp_y <= vc.right_edge[y] and \
            0.0 <= tl and tl < intersect_t:


https://bitbucket.org/yt_analysis/yt-3.0/commits/e634fe492111/
Changeset:   e634fe492111
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 23:18:31
Summary:     Enable ray walking for semi-structured mesh.

Also, change select_bbox in selection_routines to utilize walk_volume.
Affected #:  2 files

diff -r 1243ac095c3f943cf62caf285fc486b452529d08 -r e634fe4921110bedfc7a75944e9f6c0c846c6350 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -135,7 +135,9 @@
         return ind[mask]
 
     def tcoords(self, dobj):
-        dt, t = dobj.selector.get_dt(self)
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty(0, dtype='float64')
+        dt, t = dobj.selector.get_dt_mesh(self, mask.sum(), self._index_offset)
         return dt, t
 
     def deposit(self, positions, fields = None, method = None):

diff -r 1243ac095c3f943cf62caf285fc486b452529d08 -r e634fe4921110bedfc7a75944e9f6c0c846c6350 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1121,6 +1121,59 @@
             print ni, ia.hits
         return dtr, tr
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def get_dt_mesh(self, mesh, nz, int offset):
+        cdef np.ndarray[np.float64_t, ndim=3] t, dt
+        cdef np.ndarray[np.float64_t, ndim=1] tr, dtr
+        cdef np.ndarray[np.uint8_t, ndim=3, cast=True] child_mask
+        cdef int i, j, k, ni
+        cdef np.float64_t LE[3], RE[3], pos
+        cdef IntegrationAccumulator ia
+        cdef np.ndarray[np.float64_t, ndim=2] coords
+        cdef np.ndarray[np.int64_t, ndim=2] indices
+        indices = mesh.connectivity_indices
+        coords = mesh.connectivity_coords
+        cdef int nc = indices.shape[0]
+        cdef int nv = indices.shape[1]
+        if nv != 8:
+            raise NotImplementedError
+        cdef VolumeContainer vc
+        cdef int selected
+        child_mask = np.ones((1,1,1), dtype="uint8")
+        t = np.zeros((1,1,1), dtype="float64")
+        dt = np.zeros((1,1,1), dtype="float64") - 1
+        tr = np.zeros(nz, dtype="float64")
+        dtr = np.zeros(nz, dtype="float64")
+        ia.t = <np.float64_t *> t.data
+        ia.dt = <np.float64_t *> dt.data
+        ia.child_mask = <np.uint8_t *> child_mask.data
+        ia.hits = 0
+        ni = 0
+        for i in range(nc):
+            for j in range(3):
+                LE[j] = 1e60
+                RE[j] = -1e60
+            for j in range(nv):
+                for k in range(3):
+                    pos = coords[indices[i, j] - offset, k]
+                    LE[k] = fmin(pos, LE[k])
+                    RE[k] = fmax(pos, RE[k])
+            for j in range(3):
+                vc.left_edge[j] = LE[j]
+                vc.right_edge[j] = RE[j]
+                vc.dds[j] = RE[j] - LE[j]
+                vc.idds[j] = 1.0/vc.dds[j]
+                vc.dims[j] = 1
+            t[0,0,0] = dt[0,0,0] = -1
+            walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+            if dt[0,0,0] >= 0:
+                tr[ni] = t[0,0,0]
+                dtr[ni] = dt[0,0,0]
+                ni += 1
+        return dtr, tr
+
     cdef int select_point(self, np.float64_t pos[3]) nogil:
         # two 0-volume constructs don't intersect
         return 0
@@ -1134,37 +1187,25 @@
     @cython.cdivision(True)
     cdef int select_bbox(self, np.float64_t left_edge[3],
                                np.float64_t right_edge[3]) nogil:
-        cdef int i, ax
-        cdef int i1, i2
-        cdef np.float64_t vs[3], t, v[3]
-
-        # if either point is fully enclosed, we select the bounding box
-        if left_edge[0] <= self.p1[0] <= right_edge[0] and \
-           left_edge[1] <= self.p1[1] <= right_edge[1] and \
-           left_edge[2] <= self.p1[2] <= right_edge[2]:
+        cdef int i
+        cdef np.uint8_t cm = 1
+        cdef VolumeContainer vc
+        cdef IntegrationAccumulator ia
+        cdef np.float64_t dt, t
+        for i in range(3):
+            vc.left_edge[i] = left_edge[i]
+            vc.right_edge[i] = right_edge[i]
+            vc.dds[i] = right_edge[i] - left_edge[i]
+            vc.idds[i] = 1.0/vc.dds[i]
+            vc.dims[i] = 1
+        t = dt = 0.0
+        ia.t = &t
+        ia.dt = &dt
+        ia.child_mask = &cm
+        ia.hits = 0
+        walk_volume(&vc, self.p1, self.vec, dt_sampler, <void*> &ia)
+        if ia.hits > 0:
             return 1
-        if left_edge[0] <= self.p2[0] <= right_edge[0] and \
-           left_edge[1] <= self.p2[1] <= right_edge[1] and \
-           left_edge[2] <= self.p2[2] <= right_edge[2]:
-            return 1
-
-        for ax in range(3):
-            i1 = (ax+1) % 3
-            i2 = (ax+2) % 3
-            t = (left_edge[ax] - self.p1[ax])/self.vec[ax]
-            if 0.0 <= t <= 1.0 :
-                for i in range(3):
-                    vs[i] = t * self.vec[i] + self.p1[i]
-                if left_edge[i1] <= vs[i1] <= right_edge[i1] and \
-                   left_edge[i2] <= vs[i2] <= right_edge[i2] :
-                    return 1
-            t = (right_edge[ax] - self.p1[ax])/self.vec[ax]
-            if 0.0 <= t <= 1.0 :
-                for i in range(3):
-                    vs[i] = t * self.vec[i] + self.p1[i]
-                if left_edge[i1] <= vs[i1] <= right_edge[i1] and \
-                   left_edge[i2] <= vs[i2] <= right_edge[i2] :
-                    return 1
         return 0
 
     def _hash_vals(self):


https://bitbucket.org/yt_analysis/yt-3.0/commits/8b67bb360b38/
Changeset:   8b67bb360b38
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 23:52:21
Summary:     Adding get_smallest_dx for semi-structured mesh.
Affected #:  2 files

diff -r e634fe4921110bedfc7a75944e9f6c0c846c6350 -r 8b67bb360b38abcab6ac1d34a800b3da3ac4e3ea yt/geometry/unstructured_mesh_handler.py
--- a/yt/geometry/unstructured_mesh_handler.py
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -31,6 +31,7 @@
     ParticleOctreeContainer, ParticleRegions
 from yt.utilities.definitions import MAXLEVEL
 from yt.utilities.io_handler import io_registry
+from yt.utilities.lib import smallest_fwidth
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     ParallelAnalysisInterface, parallel_splitter
 
@@ -57,7 +58,11 @@
         """
         Returns (in code units) the smallest cell size in the simulation.
         """
-        raise NotImplementedError
+        dx = min(smallest_fwidth(mesh.connectivity_coords,
+                                 mesh.connectivity_indices,
+                                 mesh._index_offset)
+                 for mesh in self.meshes)
+        return dx
 
     def convert(self, unit):
         return self.parameter_file.conversion_factors[unit]

diff -r e634fe4921110bedfc7a75944e9f6c0c846c6350 -r 8b67bb360b38abcab6ac1d34a800b3da3ac4e3ea yt/utilities/lib/mesh_utilities.pyx
--- a/yt/utilities/lib/mesh_utilities.pyx
+++ b/yt/utilities/lib/mesh_utilities.pyx
@@ -70,3 +70,29 @@
             fwidths[i, j] = RE[j] - LE[j]
     return fwidths
 
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
+ at cython.cdivision(True)
+def smallest_fwidth(np.ndarray[np.float64_t, ndim=2] coords,
+                    np.ndarray[np.int64_t, ndim=2] indices,
+                    int offset = 0):
+    cdef np.float64_t fwidth = 1e60
+    cdef int nc = indices.shape[0]
+    cdef int nv = indices.shape[1]
+    if nv != 8:
+        raise NotImplementedError
+    cdef np.float64_t LE[3], RE[3]
+    cdef int i, j, k
+    for i in range(nc):
+        for j in range(3):
+            LE[j] = 1e60
+            RE[j] = -1e60
+        for j in range(nv):
+            for k in range(3):
+                pos = coords[indices[i, j] - offset, k]
+                LE[k] = fmin(pos, LE[k])
+                RE[k] = fmax(pos, RE[k])
+        for j in range(3):
+            fwidth = fmin(fwidth, RE[j] - LE[j])
+    return fwidth
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd04eade8fc1/
Changeset:   cd04eade8fc1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-17 23:52:29
Summary:     Adding MoabHex8 tests.
Affected #:  2 files

diff -r 8b67bb360b38abcab6ac1d34a800b3da3ac4e3ea -r cd04eade8fc106fe9d6358d9275142ac10a5a9dd yt/frontends/moab/tests/test_c5.py
--- /dev/null
+++ b/yt/frontends/moab/tests/test_c5.py
@@ -0,0 +1,55 @@
+"""
+Tests of semi-structured meshes in MoabHex8 format.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load, \
+    PixelizedProjectionValuesTest, \
+    FieldValuesTest
+from yt.frontends.moab.api import MoabHex8StaticOutput
+
+_fields = (("gas", "flux"),
+          )
+
+c5 = "c5/c5.h5m"
+ at requires_pf(c5)
+def test_cantor_5():
+    np.random.seed(0x4d3d3d3)
+    pf = data_dir_load(c5)
+    yield assert_equal, str(pf), "c5"
+    dso = [ None, ("sphere", ("c", (0.1, 'unitary'))),
+                  ("sphere", ("c", (0.2, 'unitary')))]
+    dd = pf.h.all_data()
+    yield assert_almost_equal, pf.h.get_smallest_dx(), 0.00411522633744843, 10
+    yield assert_equal, dd["x"].shape[0], 63*63*63
+    yield assert_almost_equal, dd["CellVolumeCode"].sum(dtype="float64"), 1.0, 10
+    for offset_1 in [1e-9, 1e-4, 0.1]:
+        for offset_2 in [1e-9, 1e-4, 0.1]:
+            ray = pf.h.ray(pf.domain_left_edge + offset_1,
+                           pf.domain_right_edge - offset_2)
+            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+    for i, p1 in enumerate(np.random.random((5, 3))):
+        for j, p2 in enumerate(np.random.random((5, 3))):
+            ray = pf.h.ray(p1, p2)
+            yield assert_almost_equal, ray["dts"].sum(dtype="float64"), 1.0, 8
+    for field in _fields:
+        for axis in [0, 1, 2]:
+            for ds in dso:
+                yield FieldValuesTest(c5, field, ds)
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/a36d1efcbe5c/
Changeset:   a36d1efcbe5c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 03:12:53
Summary:     Updating as per comments, including reverting setup.py.
Affected #:  4 files

diff -r cd04eade8fc106fe9d6358d9275142ac10a5a9dd -r a36d1efcbe5c9b2881ddfb45b7b78144d8057050 setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
 import subprocess
 import shutil
 import glob
-#import distribute_setup
-#distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+    import distribute_setup
+    distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
 # End snippet
 ######
 
-import setuptools
-
 VERSION = "3.0dev"
 
 if os.path.exists('MANIFEST'):

diff -r cd04eade8fc106fe9d6358d9275142ac10a5a9dd -r a36d1efcbe5c9b2881ddfb45b7b78144d8057050 yt/frontends/moab/__init__.py
--- a/yt/frontends/moab/__init__.py
+++ b/yt/frontends/moab/__init__.py
@@ -0,0 +1,14 @@
+"""
+Empty __init__.py file.
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r cd04eade8fc106fe9d6358d9275142ac10a5a9dd -r a36d1efcbe5c9b2881ddfb45b7b78144d8057050 yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -16,6 +16,7 @@
     config.add_subpackage("gdf")
     config.add_subpackage("maestro")
     config.add_subpackage("moab")
+    config.add_subpackage("moab/tests")
     config.add_subpackage("nyx")
     config.add_subpackage("orion")
     config.add_subpackage("artio")

diff -r cd04eade8fc106fe9d6358d9275142ac10a5a9dd -r a36d1efcbe5c9b2881ddfb45b7b78144d8057050 yt/geometry/unstructured_mesh_handler.py
--- a/yt/geometry/unstructured_mesh_handler.py
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -14,29 +14,12 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import h5py
-import numpy as na
-import string, re, gc, time, cPickle
 import weakref
 
-from itertools import chain, izip
-
 from yt.funcs import *
 from yt.utilities.logger import ytLogger as mylog
-from yt.arraytypes import blankRecordArray
-from yt.config import ytcfg
-from yt.data_objects.field_info_container import NullFunc
 from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
-from yt.geometry.particle_oct_container import \
-    ParticleOctreeContainer, ParticleRegions
-from yt.utilities.definitions import MAXLEVEL
-from yt.utilities.io_handler import io_registry
 from yt.utilities.lib import smallest_fwidth
-from yt.utilities.parallel_tools.parallel_analysis_interface import \
-    ParallelAnalysisInterface, parallel_splitter
-
-from yt.data_objects.data_containers import data_object_registry
-from yt.data_objects.octree_subset import ParticleOctreeSubset
 
 class UnstructuredGeometryHandler(GeometryHandler):
     _global_mesh = False


https://bitbucket.org/yt_analysis/yt-3.0/commits/ff66eee240ba/
Changeset:   ff66eee240ba
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 14:27:51
Summary:     Addressing comments from Kacper.
Affected #:  1 file

diff -r a36d1efcbe5c9b2881ddfb45b7b78144d8057050 -r ff66eee240bafd01764245b6402094aafe56cdde yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -13,29 +13,17 @@
 # The full license is in the file COPYING.txt, distributed with this software.
 #-----------------------------------------------------------------------------
 
-import exceptions
-import pdb
 import weakref
-import itertools
 import numpy as np
 
-from yt.funcs import *
+from yt.funcs import mylog
 from yt.utilities.definitions import x_dict, y_dict
 from yt.utilities.lib import \
     fill_fcoords, fill_fwidths
 
 from yt.data_objects.data_containers import \
     YTFieldData, \
-    YTDataContainer, \
     YTSelectionContainer
-from yt.utilities.definitions import x_dict, y_dict
-from .field_info_container import \
-    NeedsGridType, \
-    NeedsOriginalGrid, \
-    NeedsDataField, \
-    NeedsProperty, \
-    NeedsParameter
-from yt.geometry.selection_routines import convert_mask_to_indices
 import yt.geometry.particle_deposit as particle_deposit
 
 class SemiStructuredMesh(YTSelectionContainer):
@@ -69,7 +57,6 @@
 
     def _check_consistency(self):
         for gi in range(self.connectivity_indices.shape[0]):
-            v = set([])
             ind = self.connectivity_indices[gi, :] - self._index_offset
             coords = self.connectivity_coords[ind, :]
             for i in range(3):


https://bitbucket.org/yt_analysis/yt-3.0/commits/1b017807cacd/
Changeset:   1b017807cacd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 14:39:12
Summary:     Oops!  Another import fix.
Affected #:  1 file

diff -r ff66eee240bafd01764245b6402094aafe56cdde -r 1b017807cacd3b62fface80beda9ab6241b6ebfd yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -17,7 +17,7 @@
 import numpy as np
 
 from yt.funcs import mylog
-from yt.utilities.definitions import x_dict, y_dict
+from yt.exceptions import YTParticleDepositionNotImplemented
 from yt.utilities.lib import \
     fill_fcoords, fill_fwidths
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/765af59d962b/
Changeset:   765af59d962b
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 15:13:18
Summary:     Typo.  Sorry, Fido!
Affected #:  1 file

diff -r 1b017807cacd3b62fface80beda9ab6241b6ebfd -r 765af59d962b7a1d12a4cc878cf8f52b6e3d2ce4 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -17,7 +17,8 @@
 import numpy as np
 
 from yt.funcs import mylog
-from yt.exceptions import YTParticleDepositionNotImplemented
+from yt.utilities.exceptions import \
+    YTParticleDepositionNotImplemented
 from yt.utilities.lib import \
     fill_fcoords, fill_fwidths
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/448cf19ead26/
Changeset:   448cf19ead26
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 15:54:10
Summary:     Fixing some imports.  Flake8!
Affected #:  1 file

diff -r 765af59d962b7a1d12a4cc878cf8f52b6e3d2ce4 -r 448cf19ead2605065e6c902d5cca6a427acd474c yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -14,27 +14,22 @@
 #-----------------------------------------------------------------------------
 
 import h5py
+import os
 import numpy as np
 import weakref
-from yt.funcs import *
+from yt.funcs import mylog
 from yt.data_objects.unstructured_mesh import \
            SemiStructuredMesh
 from yt.geometry.unstructured_mesh_handler import \
            UnstructuredGeometryHandler
-from yt.geometry.geometry_handler import GeometryHandler, YTDataChunk
 from yt.data_objects.static_output import \
            StaticOutput
-from yt.utilities.lib import \
-    get_box_grids_level
 from yt.utilities.io_handler import \
     io_registry
 from yt.utilities.definitions import \
     mpc_conversion, sec_conversion
 
 from .fields import MoabFieldInfo, KnownMoabFields
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-import pdb
 
 def _get_convert(fname):
     def _conv(data):


https://bitbucket.org/yt_analysis/yt-3.0/commits/c804dc9fd651/
Changeset:   c804dc9fd651
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 17:11:19
Summary:     Initial pass at Pyne in-memory meshing.
Affected #:  4 files

diff -r 448cf19ead2605065e6c902d5cca6a427acd474c -r c804dc9fd651ff56d9ad7a78c1435d86b6553af4 yt/frontends/moab/api.py
--- a/yt/frontends/moab/api.py
+++ b/yt/frontends/moab/api.py
@@ -16,7 +16,8 @@
 from .data_structures import \
       MoabHex8Mesh, \
       MoabHex8Hierarchy, \
-      MoabHex8StaticOutput
+      MoabHex8StaticOutput, \
+      PyneMoabHex8StaticOutput
 
 from .fields import \
       MoabFieldInfo, \

diff -r 448cf19ead2605065e6c902d5cca6a427acd474c -r c804dc9fd651ff56d9ad7a78c1435d86b6553af4 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -126,3 +126,89 @@
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 
+class PyneMeshHex8Hierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style='moab_hex8_pyne'):
+        self.parameter_file = weakref.proxy(pf)
+        self.data_style = data_style
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.getcwd()
+        self.pyne_mesh = pf.pyne_mesh
+
+        super(PyneMeshHex8Hierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        con = self.pyne_mesh.mesh.adjTable.astype("int64")
+        from itaps import iBase
+        ent = self.pyne_mesh.structured_set.getEntities(iBase.Type.vertex)
+        coords = self.pyne_mesh.mesh.getVtxCoords(ent).astype("float64")
+        self.meshes = [MoabHex8Mesh(0, self.hierarchy_filename, con,
+                                    coords, self)]
+
+    def _detect_fields(self):
+        self.field_list = self.pyne_mesh.mesh.getAllTags(
+            self.pyne_mesh.mesh.rootSet)
+
+    def _count_grids(self):
+        self.num_grids = 1
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class PyneMoabHex8StaticOutput(StaticOutput):
+    _hierarchy_class = PyneMeshHex8Hierarchy
+    _fieldinfo_fallback = MoabFieldInfo
+    _fieldinfo_known = KnownMoabFields
+    periodicity = (False, False, False)
+
+    def __init__(self, pyne_mesh, data_style='moab_hex8_pyne',
+                 storage_filename = None):
+        filename = "pyne_mesh_" + str(id(pyne_mesh))
+        self.pyne_mesh = pyne_mesh
+        StaticOutput.__init__(self, str(filename), data_style)
+        self.storage_filename = storage_filename
+        self.filename = filename
+
+    def _set_units(self):
+        """Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['cm'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in mpc_conversion.keys():
+            self.units[unit] = 1.0 * mpc_conversion[unit] / mpc_conversion["cm"]
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+    def _parse_parameter_file(self):
+        from itaps import iBase
+        ent = self.pyne_mesh.structured_set.getEntities(iBase.Type.vertex)
+        coords = self.pyne_mesh.mesh.getVtxCoords(ent)
+        self.domain_left_edge = coords[0]
+        self.domain_right_edge = coords[-1]
+        self.domain_dimensions = self.domain_right_edge - self.domain_left_edge
+        self.refine_by = 2
+        self.dimensionality = len(self.domain_dimensions)
+        self.current_time = 0.0
+        self.unique_identifier = self.parameter_filename
+        self.cosmological_simulation = False
+        self.num_ghost_zones = 0
+        self.current_redshift = self.omega_lambda = self.omega_matter \
+                              = self.hubble_constant \
+                              = self.cosmological_simulation = 0.0
+        self.parameters['Time'] = 1.0 # Hardcode time conversion for now.
+        self.parameters["HydroMethod"] = 0 # Hardcode for now until field staggering is supported.
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        return False
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+

diff -r 448cf19ead2605065e6c902d5cca6a427acd474c -r c804dc9fd651ff56d9ad7a78c1435d86b6553af4 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -49,3 +49,32 @@
                 for g in chunk.objs:
                     ind += g.select(selector, ds, rv[field], ind) # caches
         return rv
+
+class IOHandlerMoabPyneHex8(BaseIOHandler):
+    _data_style = "moab_hex8_pyne"
+
+    def __init__(self, pf, *args, **kwargs):
+        # TODO check if _num_per_stride is needed
+        BaseIOHandler.__init__(self, *args, **kwargs)
+        self.pf = pf
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        assert size
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        fhandle = self._handle
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)
+        ngrids = sum(len(chunk.objs) for chunk in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [fname for ftype, fname in fields], ngrids)
+        for field in fields:
+            ftype, fname = field
+            ds = np.array(fhandle[field_dname(fname)][:], dtype="float64")
+            ind = 0
+            for chunk in chunks:
+                for g in chunk.objs:
+                    ind += g.select(selector, ds, rv[field], ind) # caches
+        return rv

diff -r 448cf19ead2605065e6c902d5cca6a427acd474c -r c804dc9fd651ff56d9ad7a78c1435d86b6553af4 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -96,7 +96,8 @@
     GDFStaticOutput, GDFFieldInfo, add_gdf_field
 
 from yt.frontends.moab.api import \
-    MoabHex8StaticOutput, MoabFieldInfo, add_moab_field
+    MoabHex8StaticOutput, MoabFieldInfo, add_moab_field, \
+    PyneMoabHex8StaticOutput
 
 from yt.frontends.athena.api import \
     AthenaStaticOutput, AthenaFieldInfo, add_athena_field


https://bitbucket.org/yt_analysis/yt-3.0/commits/2addbb53b742/
Changeset:   2addbb53b742
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 19:00:57
Summary:     Create connectivity and coordinates for in-memory PyNE meshes.
Affected #:  1 file

diff -r c804dc9fd651ff56d9ad7a78c1435d86b6553af4 -r 2addbb53b7424131c9e954c06ad4b2e0f78bdbba yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -139,10 +139,14 @@
         super(PyneMeshHex8Hierarchy, self).__init__(pf, data_style)
 
     def _initialize_mesh(self):
-        con = self.pyne_mesh.mesh.adjTable.astype("int64")
-        from itaps import iBase
+        from itaps import iBase, iMesh
         ent = self.pyne_mesh.structured_set.getEntities(iBase.Type.vertex)
         coords = self.pyne_mesh.mesh.getVtxCoords(ent).astype("float64")
+        vind = self.pyne_mesh.structured_set.getAdjEntIndices(
+            iBase.Type.region, iMesh.Topology.hexahedron,
+            iBase.Type.vertex)[1].indices.data
+        vind.shape = (vind.size/8.0, 8)
+        con = vind.astype("int64")
         self.meshes = [MoabHex8Mesh(0, self.hierarchy_filename, con,
                                     coords, self)]
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/4d439b6c2702/
Changeset:   4d439b6c2702
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 20:27:28
Summary:     Finishing PyNE mesh creation in-memory.
Affected #:  1 file

diff -r 2addbb53b7424131c9e954c06ad4b2e0f78bdbba -r 4d439b6c2702768ff3541d80a63a8432da5063cb yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -126,6 +126,10 @@
     def __repr__(self):
         return self.basename.rsplit(".", 1)[0]
 
+class PyneHex8Mesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
 class PyneMeshHex8Hierarchy(UnstructuredGeometryHandler):
 
     def __init__(self, pf, data_style='moab_hex8_pyne'):
@@ -144,11 +148,11 @@
         coords = self.pyne_mesh.mesh.getVtxCoords(ent).astype("float64")
         vind = self.pyne_mesh.structured_set.getAdjEntIndices(
             iBase.Type.region, iMesh.Topology.hexahedron,
-            iBase.Type.vertex)[1].indices.data
-        vind.shape = (vind.size/8.0, 8)
-        con = vind.astype("int64")
-        self.meshes = [MoabHex8Mesh(0, self.hierarchy_filename, con,
-                                    coords, self)]
+            iBase.Type.vertex)[1].indices.data.astype("int64")
+        # Divide by float so it throws an error if it's not 8
+        vind.shape = (vind.shape[0] / 8.0, 8)
+        self.meshes = [PyneHex8Mesh(0, self.hierarchy_filename,
+                                    vind, coords, self)]
 
     def _detect_fields(self):
         self.field_list = self.pyne_mesh.mesh.getAllTags(


https://bitbucket.org/yt_analysis/yt-3.0/commits/183be437b1c2/
Changeset:   183be437b1c2
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 20:58:46
Summary:     Finishing up IO for PyNE meshes.
Affected #:  2 files

diff -r 4d439b6c2702768ff3541d80a63a8432da5063cb -r 183be437b1c2851d0c7e15d46080b02ffc3d9610 yt/frontends/moab/data_structures.py
--- a/yt/frontends/moab/data_structures.py
+++ b/yt/frontends/moab/data_structures.py
@@ -155,8 +155,16 @@
                                     vind, coords, self)]
 
     def _detect_fields(self):
-        self.field_list = self.pyne_mesh.mesh.getAllTags(
-            self.pyne_mesh.mesh.rootSet)
+        # Currently, I don't know a better way to do this.  This code, for
+        # example, does not work:
+        #self.field_list = self.pyne_mesh.mesh.getAllTags(
+        #    self.pyne_mesh.mesh.rootSet)
+        # So we have to look at each entity.
+        tags = set([])
+        for ent in self.pyne_mesh.mesh.rootSet:
+            for tag in self.pyne_mesh.mesh.getAllTags(ent):
+                tags.add(tag.name)
+        self.field_list = list(tags)
 
     def _count_grids(self):
         self.num_grids = 1

diff -r 4d439b6c2702768ff3541d80a63a8432da5063cb -r 183be437b1c2851d0c7e15d46080b02ffc3d9610 yt/frontends/moab/io.py
--- a/yt/frontends/moab/io.py
+++ b/yt/frontends/moab/io.py
@@ -30,7 +30,6 @@
         self._handle = pf._handle
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
-        assert size
         chunks = list(chunks)
         assert(len(chunks) == 1)
         fhandle = self._handle
@@ -59,20 +58,24 @@
         self.pf = pf
 
     def _read_fluid_selection(self, chunks, selector, fields, size):
-        assert size
         chunks = list(chunks)
         assert(len(chunks) == 1)
-        fhandle = self._handle
+        tags = {}
         rv = {}
+        mesh = self.pf.pyne_mesh.mesh
         for field in fields:
             ftype, fname = field
-            rv[field] = np.empty(size, dtype=fhandle[field_dname(fname)].dtype)
+            rv[field] = np.empty(size, dtype="float64")
+            tags[field] = mesh.getTagHandle(fname)
+        from itaps import iBase
+        ents = self.pf.pyne_mesh.structured_set.getEntities(
+            iBase.Type.region)
         ngrids = sum(len(chunk.objs) for chunk in chunks)
         mylog.debug("Reading %s cells of %s fields in %s blocks",
                     size, [fname for ftype, fname in fields], ngrids)
         for field in fields:
             ftype, fname = field
-            ds = np.array(fhandle[field_dname(fname)][:], dtype="float64")
+            ds = tags[field][ents]
             ind = 0
             for chunk in chunks:
                 for g in chunk.objs:


https://bitbucket.org/yt_analysis/yt-3.0/commits/041fa6908898/
Changeset:   041fa6908898
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-18 22:28:26
Summary:     Adding an unsupported object listing.
Affected #:  3 files

diff -r 183be437b1c2851d0c7e15d46080b02ffc3d9610 -r 041fa69088987f233490ad884f8a53307ffee8f2 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -38,8 +38,14 @@
     ParallelAnalysisInterface, parallel_splitter
 from yt.utilities.exceptions import YTFieldNotFound
 
+def _unsupported_object(pf, obj_name):
+    def _raise_unsupp(*args, **kwargs):
+        raise YTObjectNotImplemented(pf, obj_name)
+    return _raise_unsupp
+
 class GeometryHandler(ParallelAnalysisInterface):
     _global_mesh = True
+    _unsupported_objects = ()
 
     def __init__(self, pf, data_style):
         ParallelAnalysisInterface.__init__(self)
@@ -107,6 +113,10 @@
         self.objects = []
         self.plots = []
         for name, cls in sorted(data_object_registry.items()):
+            if name in self._unsupported_objects:
+                setattr(self, name,
+                    _unsupported_object(self.parameter_file, name))
+                continue
             cname = cls.__name__
             if cname.endswith("Base"): cname = cname[:-4]
             self._add_object_class(name, cname, cls, dd)

diff -r 183be437b1c2851d0c7e15d46080b02ffc3d9610 -r 041fa69088987f233490ad884f8a53307ffee8f2 yt/geometry/unstructured_mesh_handler.py
--- a/yt/geometry/unstructured_mesh_handler.py
+++ b/yt/geometry/unstructured_mesh_handler.py
@@ -23,6 +23,7 @@
 
 class UnstructuredGeometryHandler(GeometryHandler):
     _global_mesh = False
+    _unsupported_objects = ('proj', 'covering_grid', 'smoothed_covering_grid')
 
     def __init__(self, pf, data_style):
         self.data_style = data_style

diff -r 183be437b1c2851d0c7e15d46080b02ffc3d9610 -r 041fa69088987f233490ad884f8a53307ffee8f2 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -288,3 +288,13 @@
         v += "Typically this happens when a log binning is specified "
         v += "and zero or negative values are given for the bounds."
         return v
+
+class YTObjectNotImplemented(YTException):
+    def __init__(self, pf, obj_name):
+        self.pf = pf
+        self.obj_name = obj_name
+
+    def __str__(self):
+        v  = r"The object type '%s' is not implemented for the parameter file "
+        v += r"'%s'."
+        return v % (self.obj_name, self.pf)

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list