[yt-svn] commit/yt-3.0: 33 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Wed Oct 9 13:01:11 PDT 2013


33 new commits in yt-3.0:

https://bitbucket.org/yt_analysis/yt-3.0/commits/b2cb6f33ba48/
Changeset:   b2cb6f33ba48
Branch:      yt
User:        MatthewTurk
Date:        2013-05-15 19:05:53
Summary:     Copying from Orion into new Boxlib frontend.  Will merge Castro, Maestro and Nyx.
Affected #:  8 files

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/__init__.py
--- /dev/null
+++ b/yt/frontends/boxlib/__init__.py
@@ -0,0 +1,29 @@
+"""
+API for yt.frontends.orion
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/api.py
--- /dev/null
+++ b/yt/frontends/boxlib/api.py
@@ -0,0 +1,41 @@
+"""
+API for yt.frontends.orion
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: J.S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Author: Britton Smith <brittonsmith at gmail.com>
+Affiliation: MSU
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2010-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+
+from .data_structures import \
+      OrionGrid, \
+      OrionHierarchy, \
+      OrionStaticOutput
+
+from .fields import \
+      OrionFieldInfo, \
+      add_orion_field
+
+from .io import \
+      IOHandlerNative

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/data_structures.py
--- /dev/null
+++ b/yt/frontends/boxlib/data_structures.py
@@ -0,0 +1,645 @@
+"""
+Data structures for Orion. 
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J. S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+import re
+import weakref
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
+import numpy as np
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.data_objects.hierarchy import AMRHierarchy
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+
+from .definitions import \
+    orion2enzoDict, \
+    parameterDict, \
+    yt2orionFieldsDict, \
+    orion_FAB_header_pattern
+from .fields import \
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
+
+
+class OrionGrid(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
+                 dimensions, start, stop, paranoia=False, **kwargs):
+        AMRGridPatch.__init__(self, index, **kwargs)
+        self.filename = filename
+        self._offset = offset
+        self._paranoid = paranoia
+        
+        # should error check this
+        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
+        self.start_index = start.copy()#.transpose()
+        self.stop_index = stop.copy()#.transpose()
+        self.LeftEdge  = LeftEdge.copy()
+        self.RightEdge = RightEdge.copy()
+        self.index = index
+        self.Level = level
+
+    def get_global_startindex(self):
+        return self.start_index
+
+    def _prepare_grid(self):
+        """
+        Copies all the appropriate attributes from the hierarchy
+        """
+        # This is definitely the slowest part of generating the hierarchy
+        # Now we give it pointers to all of its attributes
+        # Note that to keep in line with Enzo, we have broken PEP-8
+        h = self.hierarchy # cache it
+        #self.StartIndices = h.gridStartIndices[self.id]
+        #self.EndIndices = h.gridEndIndices[self.id]
+        h.grid_levels[self.id,0] = self.Level
+        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
+        h.grid_right_edge[self.id,:] = self.RightEdge[:]
+        #self.Time = h.gridTimes[self.id,0]
+        self.NumberOfParticles = 0 # these will be read in later
+        self.field_indexes = h.field_indexes
+        self.Children = h.gridTree[self.id]
+        pIDs = h.gridReverseTree[self.id]
+        if len(pIDs) > 0:
+            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
+        else:
+            self.Parent = None
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        dx = self.hierarchy.grid_dxs[self.index][0]
+        dy = self.hierarchy.grid_dys[self.index][0]
+        dz = self.hierarchy.grid_dzs[self.index][0]
+        self.dds = np.array([dx, dy, dz])
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "OrionGrid_%04i" % (self.id)
+
+class OrionHierarchy(AMRHierarchy):
+    grid = OrionGrid
+    def __init__(self, pf, data_style='orion_native'):
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        header_filename = os.path.join(pf.fullplotdir,'Header')
+        self.directory = pf.fullpath
+        self.data_style = data_style
+
+        self.readGlobalHeader(header_filename,self.parameter_file.paranoid_read) # also sets up the grid objects
+        self.__cache_endianness(self.levels[-1].grids[-1])
+        AMRHierarchy.__init__(self,pf, self.data_style)
+        self._populate_hierarchy()
+        self._read_particles()
+
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = np.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.fullplotdir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip()[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+        return True
+                
+    def readGlobalHeader(self,filename,paranoid_read):
+        """
+        read the global header file for an Orion plotfile output.
+        """
+        counter = 0
+        header_file = open(filename,'r')
+        self.__global_header_lines = header_file.readlines()
+
+        # parse the file
+        self.orion_version = self.__global_header_lines[0].rstrip()
+        self.n_fields      = int(self.__global_header_lines[1])
+
+        counter = self.n_fields+2
+        self.field_list = []
+        for i,line in enumerate(self.__global_header_lines[2:counter]):
+            self.field_list.append(line.rstrip())
+
+        # this is unused...eliminate it?
+        #for f in self.field_indexes:
+        #    self.field_list.append(orion2ytFieldsDict.get(f,f))
+
+        self.dimension = int(self.__global_header_lines[counter])
+        if self.dimension != 3:
+            raise RunTimeError("Orion must be in 3D to use yt.")
+        counter += 1
+        self.Time = float(self.__global_header_lines[counter])
+        counter += 1
+        self.finest_grid_level = int(self.__global_header_lines[counter])
+        self.n_levels = self.finest_grid_level + 1
+        counter += 1
+        # quantities with _unnecessary are also stored in the inputs
+        # file and are not needed.  they are read in and stored in
+        # case in the future we want to enable a "backwards" way of
+        # taking the data out of the Header file and using it to fill
+        # in in the case of a missing inputs file
+        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
+        #domain_re.search(self.__global_header_lines[counter]).groups()
+        counter += 1
+        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+        counter += 1
+        self.dx = np.zeros((self.n_levels,3))
+        for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+            self.dx[i] = np.array(map(float,line.split()))
+        counter += self.n_levels
+        self.geometry = int(self.__global_header_lines[counter])
+        if self.geometry != 0:
+            raise RunTimeError("yt only supports cartesian coordinates.")
+        counter += 1
+
+        # this is just to debug. eventually it should go away.
+        linebreak = int(self.__global_header_lines[counter])
+        if linebreak != 0:
+            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+        counter += 1
+
+        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        self.levels = []
+        grid_counter = 0
+        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
+        re_file_finder = re.compile(file_finder_pattern)
+        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
+        re_dim_finder = re.compile(dim_finder_pattern)
+        data_files_pattern = r"Level_[\d]/"
+        data_files_finder = re.compile(data_files_pattern)
+
+        for level in range(0,self.n_levels):
+            tmp = self.__global_header_lines[counter].split()
+            # should this be grid_time or level_time??
+            lev,ngrids,grid_time = int(tmp[0]),int(tmp[1]),float(tmp[2])
+            counter += 1
+            nsteps = int(self.__global_header_lines[counter])
+            counter += 1
+            self.levels.append(OrionLevel(lev,ngrids))
+            # open level header, extract file names and offsets for
+            # each grid
+            # read slightly out of order here: at the end of the lo,hi
+            # pairs for x,y,z is a *list* of files types in the Level
+            # directory. each type has Header and a number of data
+            # files (one per processor)
+            tmp_offset = counter + 3*ngrids
+            nfiles = 0
+            key_off = 0
+            files =   {} # dict(map(lambda a: (a,[]),self.field_list))
+            offsets = {} # dict(map(lambda a: (a,[]),self.field_list))
+            while nfiles+tmp_offset < len(self.__global_header_lines) and data_files_finder.match(self.__global_header_lines[nfiles+tmp_offset]):
+                filen = os.path.join(self.parameter_file.fullplotdir, \
+                                     self.__global_header_lines[nfiles+tmp_offset].strip())
+                # open each "_H" header file, and get the number of
+                # components within it
+                level_header_file = open(filen+'_H','r').read()
+                start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
+                grid_file_offset = re_file_finder.findall(level_header_file)
+                ncomp_this_file = int(level_header_file.split('\n')[2])
+                for i in range(ncomp_this_file):
+                    key = self.field_list[i+key_off]
+                    f,o = zip(*grid_file_offset)
+                    files[key] = f
+                    offsets[key] = o
+                    self.field_indexes[key] = i
+                key_off += ncomp_this_file
+                nfiles += 1
+            # convert dict of lists to list of dicts
+            fn = []
+            off = []
+            lead_path = os.path.join(self.parameter_file.fullplotdir,'Level_%i'%level)
+            for i in range(ngrids):
+                fi = [os.path.join(lead_path,files[key][i]) for key in self.field_list]
+                of = [int(offsets[key][i]) for key in self.field_list]
+                fn.append(dict(zip(self.field_list,fi)))
+                off.append(dict(zip(self.field_list,of)))
+
+            for grid in range(0,ngrids):
+                gfn = fn[grid]  # filename of file containing this grid
+                gfo = off[grid] # offset within that file
+                xlo,xhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                ylo,yhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                zlo,zhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                lo = np.array([xlo,ylo,zlo])
+                hi = np.array([xhi,yhi,zhi])
+                dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
+                self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
+                grid_counter += 1 # this is global, and shouldn't be reset
+                                  # for each level
+
+            # already read the filenames above...
+            counter+=nfiles
+            self.num_grids = grid_counter
+            self.float_type = 'float64'
+
+        self.maxLevel = self.n_levels - 1 
+        self.max_level = self.n_levels - 1
+        header_file.close()
+
+    def __cache_endianness(self,test_grid):
+        """
+        Cache the endianness and bytes perreal of the grids by using a
+        test grid and assuming that all grids have the same
+        endianness. This is a pretty safe assumption since Orion uses
+        one file per processor, and if you're running on a cluster
+        with different endian processors, then you're on your own!
+        """
+        # open the test file & grab the header
+        inFile = open(os.path.expanduser(test_grid.filename[self.field_list[0]]),'rb')
+        header = inFile.readline()
+        inFile.close()
+        header.strip()
+        
+        # parse it. the patter is in OrionDefs.py
+        headerRe = re.compile(orion_FAB_header_pattern)
+        bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
+        self._bytesPerReal = int(bytesPerReal)
+        if self._bytesPerReal == int(endian[0]):
+            dtype = '<'
+        elif self._bytesPerReal == int(endian[-1]):
+            dtype = '>'
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        dtype += ('f%i' % self._bytesPerReal) # always a floating point
+        self._dtype = dtype
+
+    def __calculate_grid_dimensions(self,start_stop):
+        start = np.array(map(int,start_stop[0].split(',')))
+        stop = np.array(map(int,start_stop[1].split(',')))
+        dimension = stop - start + 1
+        return dimension,start,stop
+        
+    def _populate_grid_objects(self):
+        mylog.debug("Creating grid objects")
+        self.grids = np.concatenate([level.grids for level in self.levels])
+        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
+        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
+        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
+        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
+        left_edges = []
+        right_edges = []
+        dims = []
+        for level in self.levels:
+            left_edges += [g.LeftEdge for g in level.grids]
+            right_edges += [g.RightEdge for g in level.grids]
+            dims += [g.ActiveDimensions for g in level.grids]
+        self.grid_left_edge = np.array(left_edges)
+        self.grid_right_edge = np.array(right_edges)
+        self.grid_dimensions = np.array(dims)
+        self.gridReverseTree = [] * self.num_grids
+        self.gridReverseTree = [ [] for i in range(self.num_grids)]
+        self.gridTree = [ [] for i in range(self.num_grids)]
+        mylog.debug("Done creating grid objects")
+
+    def _populate_hierarchy(self):
+        self.__setup_grid_tree()
+        #self._setup_grid_corners()
+        for i, grid in enumerate(self.grids):
+            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            grid._prepare_grid()
+            grid._setup_dx()
+
+    def __setup_grid_tree(self):
+        for i, grid in enumerate(self.grids):
+            children = self._get_grid_children(grid)
+            for child in children:
+                self.gridReverseTree[child.id].append(i)
+                self.gridTree[i].append(weakref.proxy(child))
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        dd["field_indexes"] = self.field_indexes
+        AMRHierarchy._setup_classes(self, dd)
+        #self._add_object_class('grid', "OrionGrid", OrionGridBase, dd)
+        self.object_types.sort()
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        return self.grids[mask]
+
+    def _count_grids(self):
+        """this is already provided in 
+
+        """
+        pass
+
+    def _initialize_grid_arrays(self):
+        mylog.debug("Allocating arrays for %s grids", self.num_grids)
+        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
+        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
+        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
+        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
+        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
+
+    def _parse_hierarchy(self):
+        pass
+    
+    def _detect_fields(self):
+        pass
+
+    def _setup_derived_fields(self):
+        pass
+
+    def _initialize_state_variables(self):
+        """override to not re-initialize num_grids in AMRHierarchy.__init__
+
+        """
+        self._parallel_locking = False
+        self._data_file = None
+        self._data_mode = None
+        self._max_locations = {}
+    
+class OrionLevel:
+    def __init__(self,level,ngrids):
+        self.level = level
+        self.ngrids = ngrids
+        self.grids = []
+    
+
+class OrionStaticOutput(StaticOutput):
+    """
+    This class is a stripped down class that simply reads and parses
+    *filename*, without looking at the Orion hierarchy.
+    """
+    _hierarchy_class = OrionHierarchy
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
+
+    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
+                 data_style='orion_native', paranoia=False,
+                 storage_filename = None):
+        """
+        The paramfile is usually called "inputs"
+        and there may be a fortran inputs file usually called "probin"
+        plotname here will be a directory name
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
+        """
+        self.storage_filename = storage_filename
+        self.paranoid_read = paranoia
+        self.parameter_filename = paramFilename
+        self.fparameter_filename = fparamFilename
+        self.__ipfn = paramFilename
+
+        self.fparameters = {}
+
+        StaticOutput.__init__(self, plotname.rstrip("/"),
+                              data_style='orion_native')
+
+        # These should maybe not be hardcoded?
+        self.parameters["HydroMethod"] = 'orion' # always PPM DE
+        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["DualEnergyFormalism"] = 0 # always off.
+        self.parameters["EOSType"] = -1 # default
+
+        if self.fparameters.has_key("mu"):
+            self.parameters["mu"] = self.fparameters["mu"]
+
+    def _localize(self, f, default):
+        if f is None:
+            return os.path.join(self.directory, default)
+        return f
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        pname = args[0].rstrip("/")
+        dn = os.path.dirname(pname)
+        if len(args) > 1: kwargs['paramFilename'] = args[1]
+        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
+
+        # We check for the job_info file's existence because this is currently
+        # what distinguishes Orion data from MAESTRO data.
+        pfn = os.path.join(pfname)
+        if not os.path.exists(pfn): return False
+        castro = any(("castro." in line for line in open(pfn)))
+        nyx = any(("nyx." in line for line in open(pfn)))
+        maestro = os.path.exists(os.path.join(pname, "job_info"))
+        really_orion = any(("geometry.prob_lo" in line for line in open(pfn)))
+        orion = (not castro) and (not maestro) and (not nyx) and really_orion
+        return orion
+        
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+        self.fullplotdir = os.path.abspath(self.parameter_filename)
+        self._parse_header_file()
+        self.parameter_filename = self._localize(
+                self.__ipfn, 'inputs')
+        self.fparameter_filename = self._localize(
+                self.fparameter_filename, 'probin')
+        if os.path.isfile(self.fparameter_filename):
+            self._parse_fparameter_file()
+            for param in self.fparameters:
+                if orion2enzoDict.has_key(param):
+                    self.parameters[orion2enzoDict[param]]=self.fparameters[param]
+        # Let's read the file
+        self.unique_identifier = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        lines = open(self.parameter_filename).readlines()
+        for lineI, line in enumerate(lines):
+            if line.find("#") >= 1: # Keep the commented lines...
+                line=line[:line.find("#")]
+            line=line.strip().rstrip()
+            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
+                continue
+            try:
+                param, vals = map(strip,map(rstrip,line.split("=")))
+            except ValueError:
+                mylog.error("ValueError: '%s'", line)
+                continue
+            if orion2enzoDict.has_key(param):
+                paramName = orion2enzoDict[param]
+                t = map(parameterDict[paramName], vals.split())
+                if len(t) == 1:
+                    self.parameters[paramName] = t[0]
+                else:
+                    if paramName == "RefineBy":
+                        self.parameters[paramName] = t[0]
+                    else:
+                        self.parameters[paramName] = t
+                
+            elif param.startswith("geometry.prob_hi"):
+                self.domain_right_edge = \
+                    np.array([float(i) for i in vals.split()])
+            elif param.startswith("geometry.prob_lo"):
+                self.domain_left_edge = \
+                    np.array([float(i) for i in vals.split()])
+            elif param.startswith("Prob.lo_bc"):
+                self.periodicity = ensure_tuple([i == 0 for i in vals])
+
+        self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
+        self.dimensionality = self.parameters["TopGridRank"]
+        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
+        self.refine_by = self.parameters["RefineBy"]
+
+        if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
+            self.cosmological_simulation = 1
+            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
+            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
+            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
+            a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
+            line = a_file.readline().strip()
+            a_file.close()
+            self.parameters["CosmologyCurrentRedshift"] = 1/float(line) - 1
+            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def _parse_fparameter_file(self):
+        """
+        Parses the fortran parameter file for Orion. Most of this will
+        be useless, but this is where it keeps mu = mass per
+        particle/m_hydrogen.
+        """
+        lines = open(self.fparameter_filename).readlines()
+        for line in lines:
+            if line.count("=") == 1:
+                param, vals = map(strip,map(rstrip,line.split("=")))
+                if vals.count("'") == 0:
+                    t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                else:
+                    t = vals.split()
+                if len(t) == 1:
+                    self.fparameters[param] = t[0]
+                else:
+                    self.fparameters[param] = t
+
+    def _parse_header_file(self):
+        """
+        Parses the BoxLib header file to get any parameters stored
+        there. Hierarchy information is read out of this file in
+        OrionHierarchy. 
+
+        Currently, only Time is read here.
+        """
+        header_file = open(os.path.join(self.fullplotdir,'Header'))
+        lines = header_file.readlines()
+        header_file.close()
+        n_fields = int(lines[1])
+        self.current_time = float(lines[3+n_fields])
+
+
+                
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/definitions.py
--- /dev/null
+++ b/yt/frontends/boxlib/definitions.py
@@ -0,0 +1,77 @@
+"""
+Various definitions for various other modules and routines
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J.S. Oishi.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+from yt.funcs import *
+
+# TODO: get rid of enzo parameters we do not need
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                }
+
+
+# converts the Orion inputs file name to the Enzo/yt name expected
+# throughout the code. key is Orion name, value is Enzo/yt equivalent
+orion2enzoDict = {"amr.n_cell": "TopGridDimensions",
+                  "materials.gamma": "Gamma",
+                  "amr.ref_ratio": "RefineBy",
+                  "castro.use_comoving": "ComovingCoordinates",
+                  "castro.redshift_in": "CosmologyInitialRedshift",
+                  "comoving_OmL": "CosmologyOmegaLambdaNow",
+                  "comoving_OmM": "CosmologyOmegaMatterNow",
+                  "comoving_h": "CosmologyHubbleConstantNow"
+                  }
+
+yt2orionFieldsDict = {}
+orion2ytFieldsDict = {}
+
+orion_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/fields.py
--- /dev/null
+++ b/yt/frontends/boxlib/fields.py
@@ -0,0 +1,191 @@
+"""
+Orion-specific fields
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: UC Berkeley
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2008-2011 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+
+from yt.utilities.physical_constants import \
+    mh, kboltz
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+
+
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
+
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
+
+add_orion_field("density", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_orion_field("eden", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
+
+add_orion_field("xmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+add_orion_field("ymom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
+
+add_orion_field("zmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+translation_dict = {"x-velocity": "xvel",
+                    "y-velocity": "yvel",
+                    "z-velocity": "zvel",
+                    "Density": "density",
+                    "TotalEnergy": "eden",
+                    "Temperature": "temperature",
+                    "x-momentum": "xmom",
+                    "y-momentum": "ymom",
+                    "z-momentum": "zmom"
+                   }
+
+for f,v in translation_dict.items():
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)])
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
+
+def _xVelocity(field, data):
+    """generate x-velocity from x-momentum and density
+    
+    """
+    return data["xmom"]/data["density"]
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _yVelocity(field,data):
+    """generate y-velocity from y-momentum and density
+
+    """
+    #try:
+    #    return data["xvel"]
+    #except KeyError:
+    return data["ymom"]/data["density"]
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _zVelocity(field,data):
+    """generate z-velocity from z-momentum and density
+    
+    """
+    return data["zmom"]/data["density"]
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _ThermalEnergy(field, data):
+    """generate thermal (gas energy). Dual Energy Formalism was
+        implemented by Stella, but this isn't how it's called, so I'll
+        leave that commented out for now.
+    """
+    #if data.pf["DualEnergyFormalism"]:
+    #    return data["GasEnergy"]
+    #else:
+    return data["TotalEnergy"] - 0.5 * data["density"] * (
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
+add_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
+
+def _Pressure(field,data):
+    """M{(Gamma-1.0)*e, where e is thermal energy density
+       NB: this will need to be modified for radiation
+    """
+    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+
+def _Temperature(field,data):
+    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+
+# particle fields
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+
+    return _Particles
+
+_particle_field_list = ["mass", 
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/io.py
--- /dev/null
+++ b/yt/frontends/boxlib/io.py
@@ -0,0 +1,160 @@
+"""
+Orion data-file handling functions
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: KIPAC/SLAC/Stanford
+Homepage: http://yt-project.org/
+License:
+  Copyright (C) 2007-2011 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import os
+import numpy as np
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+from definitions import \
+    yt2orionFieldsDict
+
+class IOHandlerNative(BaseIOHandler):
+
+    _data_style = "orion_native"
+
+    def modify(self, field):
+        return field.swapaxes(0,2)
+
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        def read(line, field):
+            return float(line.split(' ')[index[field]])
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)
+
+    def _read_data(self,grid,field):
+        """
+        reads packed multiFABs output by BoxLib in "NATIVE" format.
+
+        """
+
+        filen = os.path.expanduser(grid.filename[field])
+        off = grid._offset[field]
+        inFile = open(filen,'rb')
+        inFile.seek(off)
+        header = inFile.readline()
+        header.strip()
+
+        if grid._paranoid:
+            mylog.warn("Orion Native reader: Paranoid read mode.")
+            headerRe = re.compile(orion_FAB_header_pattern)
+            bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
+
+            # we will build up a dtype string, starting with endian
+            # check endianness (this code is ugly. fix?)
+            bytesPerReal = int(bytesPerReal)
+            if bytesPerReal == int(endian[0]):
+                dtype = '<'
+            elif bytesPerReal == int(endian[-1]):
+                dtype = '>'
+            else:
+                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+            dtype += ('f%i'% bytesPerReal) #always a floating point
+
+            # determine size of FAB
+            start = np.array(map(int,start.split(',')))
+            stop = np.array(map(int,stop.split(',')))
+
+            gridSize = stop - start + 1
+
+            error_count = 0
+            if (start != grid.start).any():
+                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
+                error_count += 1
+            if (stop != grid.stop).any():
+                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
+                error_count += 1
+            if (gridSize != grid.ActiveDimensions).any():
+                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
+                error_count += 1
+            if bytesPerReal != grid.hierarchy._bytesPerReal:
+                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
+                error_count += 1
+            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
+                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
+                error_count += 1
+
+            if error_count > 0:
+                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
+
+        else:
+            start = grid.start_index
+            stop = grid.stop_index
+            dtype = grid.hierarchy._dtype
+            bytesPerReal = grid.hierarchy._bytesPerReal
+
+        nElements = grid.ActiveDimensions.prod()
+
+        # one field has nElements*bytesPerReal bytes and is located
+        # nElements*bytesPerReal*field_index from the offset location
+        if yt2orionFieldsDict.has_key(field):
+            fieldname = yt2orionFieldsDict[field]
+        else:
+            fieldname = field
+        field_index = grid.field_indexes[fieldname]
+        inFile.seek(int(nElements*bytesPerReal*field_index),1)
+        field = np.fromfile(inFile,count=nElements,dtype=dtype)
+        field = field.reshape(grid.ActiveDimensions, order='F')
+
+        # we can/should also check against the max and min in the header file
+
+        inFile.close()
+        return field
+

diff -r a03fc16b5457aa3deba49a11277fbb41ffebd09e -r b2cb6f33ba486efad0f9a62f2c523900c899a65e yt/frontends/boxlib/setup.py
--- /dev/null
+++ b/yt/frontends/boxlib/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config


https://bitbucket.org/yt_analysis/yt-3.0/commits/030017de7f82/
Changeset:   030017de7f82
Branch:      yt
User:        MatthewTurk
Date:        2013-05-15 19:28:19
Summary:     Splitting Boxlib / Orion readers
Affected #:  1 file

diff -r b2cb6f33ba486efad0f9a62f2c523900c899a65e -r 030017de7f82cda599a2a16b00fadc9f67c34fb9 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -32,52 +32,16 @@
 from definitions import \
     yt2orionFieldsDict
 
-class IOHandlerNative(BaseIOHandler):
+class IOHandlerBoxlib(BaseIOHandler):
 
-    _data_style = "orion_native"
+    _data_style = "boxlib_base"
 
     def modify(self, field):
         return field.swapaxes(0,2)
 
-    def _read_particles(self, grid, field): 
-        """
-        parses the Orion Star Particle text files
-        
-        """
-        index = {'particle_mass': 0,
-                 'particle_position_x': 1,
-                 'particle_position_y': 2,
-                 'particle_position_z': 3,
-                 'particle_momentum_x': 4,
-                 'particle_momentum_y': 5,
-                 'particle_momentum_z': 6,
-                 'particle_angmomen_x': 7,
-                 'particle_angmomen_y': 8,
-                 'particle_angmomen_z': 9,
-                 'particle_mlast': 10,
-                 'particle_mdeut': 11,
-                 'particle_n': 12,
-                 'particle_mdot': 13,
-                 'particle_burnstate': 14,
-                 'particle_id': 15}
-
         def read(line, field):
             return float(line.split(' ')[index[field]])
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-            particles = []
-            for line in lines[1:]:
-                if grid.NumberOfParticles > 0:
-                    coord = read(line, "particle_position_x"), \
-                            read(line, "particle_position_y"), \
-                            read(line, "particle_position_z") 
-                    if ( (grid.LeftEdge < coord).all() and 
-                         (coord <= grid.RightEdge).all() ):
-                        particles.append(read(line, field))
-        return np.array(particles)
-
     def _read_data(self,grid,field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
@@ -158,3 +122,56 @@
         inFile.close()
         return field
 
+class IOHandlerOrion(IOHandlerBoxlib):
+    _data_style = "orion_native"
+
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)
+
+class IOHandlerCastro(IOHandlerBoxlib):
+    _data_style = "castro_native"
+
+    def _read_particle_field(self, grid, field):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+            castro_particle_field_names.index(field),
+            len(castro_particle_field_names),
+            tr)
+        return tr
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/0a9cf8e7251c/
Changeset:   0a9cf8e7251c
Branch:      yt
User:        MatthewTurk
Date:        2013-05-15 21:15:06
Summary:     Rename a few things and strip out redundancies.

Orion can now be read in with this.
Affected #:  3 files

diff -r 030017de7f82cda599a2a16b00fadc9f67c34fb9 -r 0a9cf8e7251ceeaa233dce48a9c120a0b1066e7a yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -1,5 +1,5 @@
 """
-Data structures for Orion. 
+Data structures for Boxlib Codes 
 
 Author: J. S. Oishi <jsoishi at gmail.com>
 Affiliation: KIPAC/SLAC/Stanford
@@ -46,15 +46,14 @@
 from .definitions import \
     orion2enzoDict, \
     parameterDict, \
-    yt2orionFieldsDict, \
-    orion_FAB_header_pattern
+    FAB_header_pattern
 from .fields import \
     OrionFieldInfo, \
     add_orion_field, \
     KnownOrionFields
 
 
-class OrionGrid(AMRGridPatch):
+class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
     def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
                  dimensions, start, stop, paranoia=False, **kwargs):
@@ -107,11 +106,11 @@
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
-        return "OrionGrid_%04i" % (self.id)
+        return "BoxlibGrid_%04i" % (self.id)
 
-class OrionHierarchy(AMRHierarchy):
-    grid = OrionGrid
-    def __init__(self, pf, data_style='orion_native'):
+class BoxlibHierarchy(AMRHierarchy):
+    grid = BoxlibGrid
+    def __init__(self, pf, data_style='boxlib_native'):
         self.field_indexes = {}
         self.parameter_file = weakref.proxy(pf)
         header_filename = os.path.join(pf.fullplotdir,'Header')
@@ -122,57 +121,11 @@
         self.__cache_endianness(self.levels[-1].grids[-1])
         AMRHierarchy.__init__(self,pf, self.data_style)
         self._populate_hierarchy()
-        self._read_particles()
+        #self._read_particles()
 
-    def _read_particles(self):
-        """
-        reads in particles and assigns them to grids. Will search for
-        Star particles, then sink particles if no star particle file
-        is found, and finally will simply note that no particles are
-        found if neither works. To add a new Orion particle type,
-        simply add it to the if/elif/else block.
-
-        """
-        self.grid_particle_count = np.zeros(len(self.grids))
-
-        for particle_filename in ["StarParticles", "SinkParticles"]:
-            fn = os.path.join(self.pf.fullplotdir, particle_filename)
-            if os.path.exists(fn): self._read_particle_file(fn)
-
-    def _read_particle_file(self, fn):
-        """actually reads the orion particle data file itself.
-
-        """
-        if not os.path.exists(fn): return
-        with open(fn, 'r') as f:
-            lines = f.readlines()
-            self.num_stars = int(lines[0].strip()[0])
-            for line in lines[1:]:
-                particle_position_x = float(line.split(' ')[1])
-                particle_position_y = float(line.split(' ')[2])
-                particle_position_z = float(line.split(' ')[3])
-                coord = [particle_position_x, particle_position_y, particle_position_z]
-                # for each particle, determine which grids contain it
-                # copied from object_finding_mixin.py
-                mask=np.ones(self.num_grids)
-                for i in xrange(len(coord)):
-                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
-                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
-                ind = np.where(mask == 1)
-                selected_grids = self.grids[ind]
-                # in orion, particles always live on the finest level.
-                # so, we want to assign the particle to the finest of
-                # the grids we just found
-                if len(selected_grids) != 0:
-                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
-                    ind = np.where(self.grids == grid)[0][0]
-                    self.grid_particle_count[ind] += 1
-                    self.grids[ind].NumberOfParticles += 1
-        return True
-                
     def readGlobalHeader(self,filename,paranoid_read):
         """
-        read the global header file for an Orion plotfile output.
+        read the global header file for an Boxlib plotfile output.
         """
         counter = 0
         header_file = open(filename,'r')
@@ -248,7 +201,7 @@
             counter += 1
             nsteps = int(self.__global_header_lines[counter])
             counter += 1
-            self.levels.append(OrionLevel(lev,ngrids))
+            self.levels.append(BoxlibLevel(lev,ngrids))
             # open level header, extract file names and offsets for
             # each grid
             # read slightly out of order here: at the end of the lo,hi
@@ -316,7 +269,7 @@
         """
         Cache the endianness and bytes perreal of the grids by using a
         test grid and assuming that all grids have the same
-        endianness. This is a pretty safe assumption since Orion uses
+        endianness. This is a pretty safe assumption since Boxlib uses
         one file per processor, and if you're running on a cluster
         with different endian processors, then you're on your own!
         """
@@ -326,8 +279,7 @@
         inFile.close()
         header.strip()
         
-        # parse it. the patter is in OrionDefs.py
-        headerRe = re.compile(orion_FAB_header_pattern)
+        headerRe = re.compile(FAB_header_pattern)
         bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
         self._bytesPerReal = int(bytesPerReal)
         if self._bytesPerReal == int(endian[0]):
@@ -389,7 +341,6 @@
         dd = self._get_data_reader_dict()
         dd["field_indexes"] = self.field_indexes
         AMRHierarchy._setup_classes(self, dd)
-        #self._add_object_class('grid', "OrionGrid", OrionGridBase, dd)
         self.object_types.sort()
 
     def _get_grid_children(self, grid):
@@ -431,19 +382,19 @@
         self._data_mode = None
         self._max_locations = {}
     
-class OrionLevel:
+class BoxlibLevel:
     def __init__(self,level,ngrids):
         self.level = level
         self.ngrids = ngrids
         self.grids = []
     
 
-class OrionStaticOutput(StaticOutput):
+class BoxlibStaticOutput(StaticOutput):
     """
     This class is a stripped down class that simply reads and parses
-    *filename*, without looking at the Orion hierarchy.
+    *filename*, without looking at the Boxlib hierarchy.
     """
-    _hierarchy_class = OrionHierarchy
+    _hierarchy_class = BoxlibHierarchy
     _fieldinfo_fallback = OrionFieldInfo
     _fieldinfo_known = KnownOrionFields
 
@@ -621,8 +572,6 @@
         self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
         for unit in sec_conversion.keys():
             self.time_units[unit] = 1.0 / sec_conversion[unit]
-        for key in yt2orionFieldsDict:
-            self.conversion_factors[key] = 1.0
 
     def _setup_nounits_units(self):
         z = 0
@@ -643,3 +592,50 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+class OrionHierarchy(BoxlibHierarchy):
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = np.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.fullplotdir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip()[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+        return True
+                

diff -r 030017de7f82cda599a2a16b00fadc9f67c34fb9 -r 0a9cf8e7251ceeaa233dce48a9c120a0b1066e7a yt/frontends/boxlib/definitions.py
--- a/yt/frontends/boxlib/definitions.py
+++ b/yt/frontends/boxlib/definitions.py
@@ -71,7 +71,4 @@
                   "comoving_h": "CosmologyHubbleConstantNow"
                   }
 
-yt2orionFieldsDict = {}
-orion2ytFieldsDict = {}
-
-orion_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"
+FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"

diff -r 030017de7f82cda599a2a16b00fadc9f67c34fb9 -r 0a9cf8e7251ceeaa233dce48a9c120a0b1066e7a yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -42,17 +42,6 @@
     def modify(self, field):
         return field.swapaxes(0,2)
 
-    def _read_particle_field(self, grid, field):
-        offset = grid._particle_offset
-        filen = os.path.expanduser(grid.particle_filename)
-        off = grid._particle_offset
-        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
-        read_castro_particles(filen, off,
-            castro_particle_field_names.index(field),
-            len(castro_particle_field_names),
-            tr)
-        return tr
-
     def _read_data(self, grid, field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.


https://bitbucket.org/yt_analysis/yt-3.0/commits/eab92b6091b5/
Changeset:   eab92b6091b5
Branch:      yt
User:        MatthewTurk
Date:        2013-05-15 23:36:20
Summary:     Cleaning up the fparameters parsing.
Affected #:  1 file

diff -r 0a9cf8e7251ceeaa233dce48a9c120a0b1066e7a -r eab92b6091b50e2155ef74b60e88317d6041ad43 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -528,18 +528,23 @@
         be useless, but this is where it keeps mu = mass per
         particle/m_hydrogen.
         """
-        lines = open(self.fparameter_filename).readlines()
-        for line in lines:
-            if line.count("=") == 1:
-                param, vals = map(strip,map(rstrip,line.split("=")))
-                if vals.count("'") == 0:
-                    t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
-                else:
-                    t = vals.split()
-                if len(t) == 1:
-                    self.fparameters[param] = t[0]
-                else:
-                    self.fparameters[param] = t
+        regexp = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+        for line in (l for l in open(self.fparameter_filename) if "=" in l):
+            param, vals = [v.strip() for v in line.split("=")]
+            # Now, there are a couple different types of parameters.
+            # Some will be where you only have floating point values, others
+            # will be where things are specified as string literals.
+            # Unfortunately, we're also using Fortran values, which will have
+            # things like 1.d-2 which is pathologically difficult to parse if
+            # your C library doesn't include 'd' in its locale for strtod.
+            # So we'll try to determine this.
+            vals = vals.split()
+            if any(regexp.match(v) for v in vals):
+                vals = [float(v.replace("D","e").replace("d","e"))
+                        for v in vals]
+            if len(vals) == 1:
+                vals = vals[0]
+            self.fparameters[param] = vals
 
     def _parse_header_file(self):
         """


https://bitbucket.org/yt_analysis/yt-3.0/commits/db3431380b9c/
Changeset:   db3431380b9c
Branch:      yt
User:        MatthewTurk
Date:        2013-05-16 23:01:54
Summary:     Major rewrite of Boxlib infrastructure.  Overall cleanup.

Passes limited tests, but projections operate properly so far for both Orion
and Maestro.
Affected #:  3 files

diff -r eab92b6091b50e2155ef74b60e88317d6041ad43 -r db3431380b9c83d64b81f97fc6dc72799c6d0779 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -67,6 +67,7 @@
         self.pf = self.hierarchy.parameter_file  # weakref already
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
+        self.filename = filename
 
     def get_global_startindex(self):
         """

diff -r eab92b6091b50e2155ef74b60e88317d6041ad43 -r db3431380b9c83d64b81f97fc6dc72799c6d0779 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -30,6 +30,7 @@
 from collections import defaultdict
 from string import strip, rstrip
 from stat import ST_CTIME
+import glob
 
 import numpy as np
 
@@ -42,6 +43,8 @@
     mpc_conversion, sec_conversion
 from yt.utilities.parallel_tools.parallel_analysis_interface import \
     parallel_root_only
+from yt.utilities.lib import \
+    get_box_grids_level
 
 from .definitions import \
     orion2enzoDict, \
@@ -51,221 +54,142 @@
     OrionFieldInfo, \
     add_orion_field, \
     KnownOrionFields
+from .io import IOHandlerBoxlib
 
+_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+_file_finder = re.compile(r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n")
+_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+_data_files_finder = re.compile(r"Level_[\d]/")
 
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
-                 dimensions, start, stop, paranoia=False, **kwargs):
-        AMRGridPatch.__init__(self, index, **kwargs)
-        self.filename = filename
+
+    def __init__(self, grid_id, offset, filename = None,
+                 hierarchy = None):
+        super(BoxlibGrid, self).__init__(grid_id, filename, hierarchy)
         self._offset = offset
-        self._paranoid = paranoia
-        
-        # should error check this
-        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
-        self.LeftEdge  = LeftEdge.copy()
-        self.RightEdge = RightEdge.copy()
-        self.index = index
-        self.Level = level
+        self._parent_id = []
+        self._children_ids = []
+
+    def _prepare_grid(self):
+        super(BoxlibGrid, self)._prepare_grid()
+        my_ind = self.id - self._id_offset
+        self.start_index = self.hierarchy.grid_start_index[my_ind]
 
     def get_global_startindex(self):
         return self.start_index
 
-    def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
-        # This is definitely the slowest part of generating the hierarchy
-        # Now we give it pointers to all of its attributes
-        # Note that to keep in line with Enzo, we have broken PEP-8
-        h = self.hierarchy # cache it
-        #self.StartIndices = h.gridStartIndices[self.id]
-        #self.EndIndices = h.gridEndIndices[self.id]
-        h.grid_levels[self.id,0] = self.Level
-        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
-        h.grid_right_edge[self.id,:] = self.RightEdge[:]
-        #self.Time = h.gridTimes[self.id,0]
-        self.NumberOfParticles = 0 # these will be read in later
-        self.field_indexes = h.field_indexes
-        self.Children = h.gridTree[self.id]
-        pIDs = h.gridReverseTree[self.id]
-        if len(pIDs) > 0:
-            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
-        else:
-            self.Parent = None
-
     def _setup_dx(self):
         # has already been read in and stored in hierarchy
-        dx = self.hierarchy.grid_dxs[self.index][0]
-        dy = self.hierarchy.grid_dys[self.index][0]
-        dz = self.hierarchy.grid_dzs[self.index][0]
-        self.dds = np.array([dx, dy, dz])
+        my_ind = self.id - self._id_offset
+        self.dds = self.hierarchy.level_dds[self.Level,:]
         self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
 
     def __repr__(self):
         return "BoxlibGrid_%04i" % (self.id)
 
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.hierarchy.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.hierarchy.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
 class BoxlibHierarchy(AMRHierarchy):
     grid = BoxlibGrid
     def __init__(self, pf, data_style='boxlib_native'):
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        header_filename = os.path.join(pf.fullplotdir,'Header')
-        self.directory = pf.fullpath
         self.data_style = data_style
+        self.header_filename = os.path.join(pf.fullplotdir, 'Header')
 
-        self.readGlobalHeader(header_filename,self.parameter_file.paranoid_read) # also sets up the grid objects
-        self.__cache_endianness(self.levels[-1].grids[-1])
-        AMRHierarchy.__init__(self,pf, self.data_style)
-        self._populate_hierarchy()
+        AMRHierarchy.__init__(self, pf, data_style)
+        self._cache_endianness(self.grids[-1])
         #self._read_particles()
 
-    def readGlobalHeader(self,filename,paranoid_read):
+    def _parse_hierarchy(self):
         """
         read the global header file for an Boxlib plotfile output.
         """
-        counter = 0
-        header_file = open(filename,'r')
-        self.__global_header_lines = header_file.readlines()
+        self.max_level = self.parameter_file._max_level
+        header_file = open(self.header_filename,'r')
+        # We can now skip to the point in the file we want to start parsing at.
+        header_file.seek(self.parameter_file._header_mesh_start)
 
-        # parse the file
-        self.orion_version = self.__global_header_lines[0].rstrip()
-        self.n_fields      = int(self.__global_header_lines[1])
-
-        counter = self.n_fields+2
-        self.field_list = []
-        for i,line in enumerate(self.__global_header_lines[2:counter]):
-            self.field_list.append(line.rstrip())
-
-        # this is unused...eliminate it?
-        #for f in self.field_indexes:
-        #    self.field_list.append(orion2ytFieldsDict.get(f,f))
-
-        self.dimension = int(self.__global_header_lines[counter])
-        if self.dimension != 3:
-            raise RunTimeError("Orion must be in 3D to use yt.")
-        counter += 1
-        self.Time = float(self.__global_header_lines[counter])
-        counter += 1
-        self.finest_grid_level = int(self.__global_header_lines[counter])
-        self.n_levels = self.finest_grid_level + 1
-        counter += 1
-        # quantities with _unnecessary are also stored in the inputs
-        # file and are not needed.  they are read in and stored in
-        # case in the future we want to enable a "backwards" way of
-        # taking the data out of the Header file and using it to fill
-        # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
-        counter += 1
-        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
-        counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split() #np.array(map(int,self.__global_header_lines[counter].split()))
-        counter += 1
-        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
-        #domain_re.search(self.__global_header_lines[counter]).groups()
-        counter += 1
-        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
-        counter += 1
-        self.dx = np.zeros((self.n_levels,3))
-        for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = np.array(map(float,line.split()))
-        counter += self.n_levels
-        self.geometry = int(self.__global_header_lines[counter])
-        if self.geometry != 0:
+        dx = []
+        for i in range(self.max_level + 1):
+            dx.append([float(v) for v in header_file.next().split()])
+        self.level_dds = np.array(dx, dtype="float64")
+        if int(header_file.next()) != 0:
             raise RunTimeError("yt only supports cartesian coordinates.")
-        counter += 1
-
-        # this is just to debug. eventually it should go away.
-        linebreak = int(self.__global_header_lines[counter])
-        if linebreak != 0:
+        if int(header_file.next()) != 0:
             raise RunTimeError("INTERNAL ERROR! This should be a zero.")
-        counter += 1
 
         # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
-        self.levels = []
+        self.grids = []
         grid_counter = 0
-        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
-        re_file_finder = re.compile(file_finder_pattern)
-        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
-        re_dim_finder = re.compile(dim_finder_pattern)
-        data_files_pattern = r"Level_[\d]/"
-        data_files_finder = re.compile(data_files_pattern)
-
-        for level in range(0,self.n_levels):
-            tmp = self.__global_header_lines[counter].split()
+        for level in range(self.max_level + 1):
+            vals = header_file.next().split()
             # should this be grid_time or level_time??
-            lev,ngrids,grid_time = int(tmp[0]),int(tmp[1]),float(tmp[2])
-            counter += 1
-            nsteps = int(self.__global_header_lines[counter])
-            counter += 1
-            self.levels.append(BoxlibLevel(lev,ngrids))
-            # open level header, extract file names and offsets for
-            # each grid
-            # read slightly out of order here: at the end of the lo,hi
-            # pairs for x,y,z is a *list* of files types in the Level
-            # directory. each type has Header and a number of data
-            # files (one per processor)
-            tmp_offset = counter + 3*ngrids
-            nfiles = 0
-            key_off = 0
-            files =   {} # dict(map(lambda a: (a,[]),self.field_list))
-            offsets = {} # dict(map(lambda a: (a,[]),self.field_list))
-            while nfiles+tmp_offset < len(self.__global_header_lines) and data_files_finder.match(self.__global_header_lines[nfiles+tmp_offset]):
-                filen = os.path.join(self.parameter_file.fullplotdir, \
-                                     self.__global_header_lines[nfiles+tmp_offset].strip())
-                # open each "_H" header file, and get the number of
+            lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+            assert(lev == level)
+            nsteps = int(header_file.next())
+            for gi in range(ngrids):
+                xlo, xhi = [float(v) for v in header_file.next().split()]
+                ylo, yhi = [float(v) for v in header_file.next().split()]
+                zlo, zhi = [float(v) for v in header_file.next().split()]
+                self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
+                self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
+            # Now we get to the level header filename, which we open and parse.
+            fn = os.path.join(self.parameter_file.fullplotdir,
+                              header_file.next().strip())
+            level_header_file = open(fn + "_H")
+            level_dir = os.path.dirname(fn)
+            # We skip the first two lines, although they probably contain
+            # useful information I don't know how to decipher.
+            level_header_file.next()
+            level_header_file.next()
+            # Now we get the number of data files
+            ncomp_this_file = int(level_header_file.next())
+            # Skip the next line, and we should get the number of grids / FABs
+            # in this file
+            level_header_file.next()
+            # To decipher this next line, we expect something like:
+            # (8 0
+            # where the first is the number of FABs in this level.
+            ngrids = int(level_header_file.next().split()[0][1:])
+            # Now we can iterate over each and get the indices.
+            for gi in range(ngrids):
                 # components within it
-                level_header_file = open(filen+'_H','r').read()
-                start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
-                grid_file_offset = re_file_finder.findall(level_header_file)
-                ncomp_this_file = int(level_header_file.split('\n')[2])
-                for i in range(ncomp_this_file):
-                    key = self.field_list[i+key_off]
-                    f,o = zip(*grid_file_offset)
-                    files[key] = f
-                    offsets[key] = o
-                    self.field_indexes[key] = i
-                key_off += ncomp_this_file
-                nfiles += 1
-            # convert dict of lists to list of dicts
-            fn = []
-            off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,'Level_%i'%level)
-            for i in range(ngrids):
-                fi = [os.path.join(lead_path,files[key][i]) for key in self.field_list]
-                of = [int(offsets[key][i]) for key in self.field_list]
-                fn.append(dict(zip(self.field_list,fi)))
-                off.append(dict(zip(self.field_list,of)))
-
-            for grid in range(0,ngrids):
-                gfn = fn[grid]  # filename of file containing this grid
-                gfo = off[grid] # offset within that file
-                xlo,xhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                ylo,yhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                zlo,zhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                lo = np.array([xlo,ylo,zlo])
-                hi = np.array([xhi,yhi,zhi])
-                dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
-                grid_counter += 1 # this is global, and shouldn't be reset
-                                  # for each level
-
+                start, stop = _dim_finder.match(level_header_file.next()).groups()
+                start = np.array(start.split(","), dtype="int64")
+                stop = np.array(stop.split(","), dtype="int64")
+                dims = stop - start + 1
+                self.grid_dimensions[grid_counter + gi,:] = dims
+                self.grid_start_index[grid_counter + gi,:] = start
+            # Now we read two more lines.  The first of these is a close
+            # parenthesis.
+            level_header_file.next()
+            # This line I'm not 100% sure of, but it's either number of grids
+            # or number of FABfiles.
+            level_header_file.next()
+            # Now we iterate over grids to find their offsets in each file.
+            for gi in range(ngrids):
+                # Now we get the data file, at which point we're ready to
+                # create the grid.
+                dummy, filename, offset = level_header_file.next().split()
+                filename = os.path.join(level_dir, filename)
+                go = self.grid(grid_counter + gi, int(offset), filename, self)
+                go.Level = self.grid_levels[grid_counter + gi,:] = level
+                self.grids.append(go)
+            grid_counter += ngrids
             # already read the filenames above...
-            counter+=nfiles
-            self.num_grids = grid_counter
             self.float_type = 'float64'
 
-        self.maxLevel = self.n_levels - 1 
-        self.max_level = self.n_levels - 1
-        header_file.close()
-
-    def __cache_endianness(self,test_grid):
+    def _cache_endianness(self,test_grid):
         """
         Cache the endianness and bytes perreal of the grids by using a
         test grid and assuming that all grids have the same
@@ -274,7 +198,7 @@
         with different endian processors, then you're on your own!
         """
         # open the test file & grab the header
-        inFile = open(os.path.expanduser(test_grid.filename[self.field_list[0]]),'rb')
+        inFile = open(os.path.expanduser(test_grid.filename),'rb')
         header = inFile.readline()
         inFile.close()
         header.strip()
@@ -292,7 +216,7 @@
         dtype += ('f%i' % self._bytesPerReal) # always a floating point
         self._dtype = dtype
 
-    def __calculate_grid_dimensions(self,start_stop):
+    def __calculate_grid_dimensions(self, start_stop):
         start = np.array(map(int,start_stop[0].split(',')))
         stop = np.array(map(int,start_stop[1].split(',')))
         dimension = stop - start + 1
@@ -300,78 +224,57 @@
         
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
-        self.grids = np.concatenate([level.grids for level in self.levels])
-        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = np.array(self.grid_levels.reshape((self.num_grids,1)),dtype='int32')
-        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
-        left_edges = []
-        right_edges = []
-        dims = []
-        for level in self.levels:
-            left_edges += [g.LeftEdge for g in level.grids]
-            right_edges += [g.RightEdge for g in level.grids]
-            dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = np.array(left_edges)
-        self.grid_right_edge = np.array(right_edges)
-        self.grid_dimensions = np.array(dims)
-        self.gridReverseTree = [] * self.num_grids
-        self.gridReverseTree = [ [] for i in range(self.num_grids)]
-        self.gridTree = [ [] for i in range(self.num_grids)]
-        mylog.debug("Done creating grid objects")
-
-    def _populate_hierarchy(self):
-        self.__setup_grid_tree()
-        #self._setup_grid_corners()
+        self.grids = np.array(self.grids, dtype='object')
+        self._reconstruct_parent_child()
         for i, grid in enumerate(self.grids):
             if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
             grid._prepare_grid()
             grid._setup_dx()
+        mylog.debug("Done creating grid objects")
 
-    def __setup_grid_tree(self):
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
         for i, grid in enumerate(self.grids):
-            children = self._get_grid_children(grid)
-            for child in children:
-                self.gridReverseTree[child.id].append(i)
-                self.gridTree[i].append(weakref.proxy(child))
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        dd["field_indexes"] = self.field_indexes
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
 
     def _get_grid_children(self, grid):
         mask = np.zeros(self.num_grids, dtype='bool')
         grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
         mask[grid_ind] = True
         mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return self.grids[mask]
+        return np.where(mask)
 
     def _count_grids(self):
-        """this is already provided in 
-
-        """
-        pass
-
+        # We can get everything from the Header file, but note that we're
+        # duplicating some work done elsewhere.  In a future where we don't
+        # pre-allocate grid arrays, this becomes unnecessary.
+        header_file = open(self.header_filename, 'r')
+        header_file.seek(self.parameter_file._header_mesh_start)
+        # Skip over the level dxs, geometry and the zero:
+        [header_file.next() for i in range(self.parameter_file._max_level + 3)]
+        # Now we need to be very careful, as we've seeked, and now we iterate.
+        # Does this work?  We are going to count the number of places that we
+        # have a three-item line.  The three items would be level, number of
+        # grids, and then grid time.
+        self.num_grids = 0
+        for line in header_file:
+            if len(line.split()) != 3: continue
+            self.num_grids += int(line.split()[1])
+        
     def _initialize_grid_arrays(self):
-        mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
-
-    def _parse_hierarchy(self):
-        pass
-    
-    def _detect_fields(self):
-        pass
-
-    def _setup_derived_fields(self):
-        pass
+        super(BoxlibHierarchy, self)._initialize_grid_arrays()
+        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
 
     def _initialize_state_variables(self):
         """override to not re-initialize num_grids in AMRHierarchy.__init__
@@ -381,7 +284,17 @@
         self._data_file = None
         self._data_mode = None
         self._max_locations = {}
-    
+
+    def _detect_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = self.parameter_file._field_list[:]
+        self.field_indexes = dict((f, i)
+                                for i, f in enumerate(self.field_list))
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+
 class BoxlibLevel:
     def __init__(self,level,ngrids):
         self.level = level
@@ -399,7 +312,7 @@
     _fieldinfo_known = KnownOrionFields
 
     def __init__(self, plotname, paramFilename=None, fparamFilename=None,
-                 data_style='orion_native', paranoia=False,
+                 data_style='boxlib_native', paranoia=False,
                  storage_filename = None):
         """
         The paramfile is usually called "inputs"
@@ -417,7 +330,7 @@
         self.fparameters = {}
 
         StaticOutput.__init__(self, plotname.rstrip("/"),
-                              data_style='orion_native')
+                              data_style='boxlib_native')
 
         # These should maybe not be hardcoded?
         self.parameters["HydroMethod"] = 'orion' # always PPM DE
@@ -464,71 +377,59 @@
         self.fparameter_filename = self._localize(
                 self.fparameter_filename, 'probin')
         if os.path.isfile(self.fparameter_filename):
-            self._parse_fparameter_file()
-            for param in self.fparameters:
-                if orion2enzoDict.has_key(param):
-                    self.parameters[orion2enzoDict[param]]=self.fparameters[param]
+            self._parse_probin()
         # Let's read the file
         self.unique_identifier = \
             int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.parameter_filename).readlines()
-        for lineI, line in enumerate(lines):
-            if line.find("#") >= 1: # Keep the commented lines...
-                line=line[:line.find("#")]
-            line=line.strip().rstrip()
-            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
-                continue
-            try:
-                param, vals = map(strip,map(rstrip,line.split("=")))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-                continue
-            if orion2enzoDict.has_key(param):
-                paramName = orion2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    self.parameters[paramName] = t[0]
+        for line in (line.split("#")[0].strip() for line in
+                     open(self.parameter_filename)):
+            if len(line) == 0: continue
+            param, vals = [s.strip() for s in line.split("=")]
+            if param == "amr.n_cell":
+                vals = self.domain_dimensions = np.array(vals.split(), dtype='int32')
+            elif param == "amr.ref_ratio":
+                vals = self.refine_by = int(vals[0])
+            elif param == "Prob.lo_bc":
+                vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
+            elif param == "castro.use_comoving":
+                vals = self.cosmological_simulation = bool(vals)
+            else:
+                # Now we guess some things about the parameter and its type
+                v = vals.split()[0] # Just in case there are multiple; we'll go
+                                    # back afterward to using vals.
+                try:
+                    float(v.upper().replace("D","E"))
+                except:
+                    pcast = str
                 else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
+                    syms = (".", "D+", "D-", "E+", "E-")
+                    if any(sym in v for sym in syms for v in vals.split()):
+                        pcast = float
                     else:
-                        self.parameters[paramName] = t
-                
-            elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = \
-                    np.array([float(i) for i in vals.split()])
-            elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = \
-                    np.array([float(i) for i in vals.split()])
-            elif param.startswith("Prob.lo_bc"):
-                self.periodicity = ensure_tuple([i == 0 for i in vals])
-
-        self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
-        self.dimensionality = self.parameters["TopGridRank"]
-        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"],dtype='int32')
-        self.refine_by = self.parameters["RefineBy"]
+                        pcast = int
+                vals = [pcast(v) for v in vals.split()]
+                if len(vals) == 1: vals = vals[0]
+            self.parameters[param] = vals
 
         if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
             self.cosmological_simulation = 1
-            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
-            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
-            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
+            self.omega_lambda = self.parameters["comoving_OmL"]
+            self.omega_matter = self.parameters["comoving_OmM"]
+            self.hubble_constant = self.parameters["comoving_h"]
             a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
             line = a_file.readline().strip()
             a_file.close()
-            self.parameters["CosmologyCurrentRedshift"] = 1/float(line) - 1
-            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+            self.current_redshift = 1/float(line) - 1
         else:
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
-    def _parse_fparameter_file(self):
+    def _parse_probin(self):
         """
         Parses the fortran parameter file for Orion. Most of this will
         be useless, but this is where it keeps mu = mass per
         particle/m_hydrogen.
         """
-        regexp = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
         for line in (l for l in open(self.fparameter_filename) if "=" in l):
             param, vals = [v.strip() for v in line.split("=")]
             # Now, there are a couple different types of parameters.
@@ -539,29 +440,49 @@
             # your C library doesn't include 'd' in its locale for strtod.
             # So we'll try to determine this.
             vals = vals.split()
-            if any(regexp.match(v) for v in vals):
+            if any(_scinot_finder.match(v) for v in vals):
                 vals = [float(v.replace("D","e").replace("d","e"))
                         for v in vals]
             if len(vals) == 1:
                 vals = vals[0]
-            self.fparameters[param] = vals
+            self.parameters[param] = vals
 
     def _parse_header_file(self):
         """
-        Parses the BoxLib header file to get any parameters stored
-        there. Hierarchy information is read out of this file in
-        OrionHierarchy. 
+        We parse the Boxlib header, which we use as our basis.  Anything in the
+        inputs file will override this, but the inputs file is not strictly
+        necessary for orientation of the data in space.
+        """
 
-        Currently, only Time is read here.
-        """
+        # Note: Python uses a read-ahead buffer, so using .next(), which would
+        # be my preferred solution, won't work here.  We have to explicitly
+        # call readline() if we want to end up with an offset at the very end.
+        # Fortunately, elsewhere we don't care about the offset, so we're fine
+        # everywhere else using iteration exclusively.
         header_file = open(os.path.join(self.fullplotdir,'Header'))
-        lines = header_file.readlines()
-        header_file.close()
-        n_fields = int(lines[1])
-        self.current_time = float(lines[3+n_fields])
+        self.orion_version = header_file.readline().rstrip()
+        n_fields = int(header_file.readline())
 
+        self._field_list = [header_file.readline().strip()
+                           for i in range(n_fields)]
 
-                
+        self.dimensionality = int(header_file.readline())
+        if self.dimensionality != 3:
+            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+        self.current_time = float(header_file.readline())
+        # This is traditionally a hierarchy attribute, so we will set it, but
+        # in a slightly hidden variable.
+        self._max_level = int(header_file.readline()) 
+        self.domain_left_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        self.domain_right_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        self.refine_by = int(header_file.readline())
+        # We now skip over global index space and timesteps per level
+        for i in range(2):
+            header_file.readline()
+        self._header_mesh_start = header_file.tell()
+
     def _set_units(self):
         """
         Generates the conversion to various physical _units based on the parameter file

diff -r eab92b6091b50e2155ef74b60e88317d6041ad43 -r db3431380b9c83d64b81f97fc6dc72799c6d0779 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -26,15 +26,13 @@
 
 import os
 import numpy as np
+from yt.utilities.lib import read_castro_particles, read_and_seek
 from yt.utilities.io_handler import \
            BaseIOHandler
 
-from definitions import \
-    yt2orionFieldsDict
-
 class IOHandlerBoxlib(BaseIOHandler):
 
-    _data_style = "boxlib_base"
+    _data_style = "boxlib_native"
 
     def modify(self, field):
         return field.swapaxes(0,2)
@@ -42,84 +40,27 @@
         def read(line, field):
             return float(line.split(' ')[index[field]])
 
-    def _read_data(self,grid,field):
+    def _read_data(self, grid, field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
 
         """
 
-        filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen,'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
+        filen = os.path.expanduser(grid.filename)
+        offset1 = grid._offset
+        # one field has nElements * bytesPerReal bytes and is located
+        # nElements * bytesPerReal * field_index from the offset location
+        bytesPerReal = grid.hierarchy._bytesPerReal
 
-        if grid._paranoid:
-            mylog.warn("Orion Native reader: Paranoid read mode.")
-            headerRe = re.compile(orion_FAB_header_pattern)
-            bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
+        field_index = grid.hierarchy.field_indexes[field]
+        nElements = grid.ActiveDimensions.prod()
+        offset2 = int(nElements*bytesPerReal*field_index)
 
-            # we will build up a dtype string, starting with endian
-            # check endianness (this code is ugly. fix?)
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i'% bytesPerReal) #always a floating point
-
-            # determine size of FAB
-            start = np.array(map(int,start.split(',')))
-            stop = np.array(map(int,stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-
-        else:
-            start = grid.start_index
-            stop = grid.stop_index
-            dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
-
-        nElements = grid.ActiveDimensions.prod()
-
-        # one field has nElements*bytesPerReal bytes and is located
-        # nElements*bytesPerReal*field_index from the offset location
-        if yt2orionFieldsDict.has_key(field):
-            fieldname = yt2orionFieldsDict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = np.fromfile(inFile,count=nElements,dtype=dtype)
+        dtype = grid.hierarchy._dtype
+        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
+        read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
         field = field.reshape(grid.ActiveDimensions, order='F')
 
-        # we can/should also check against the max and min in the header file
-
-        inFile.close()
         return field
 
 class IOHandlerOrion(IOHandlerBoxlib):


https://bitbucket.org/yt_analysis/yt-3.0/commits/88199784b19d/
Changeset:   88199784b19d
Branch:      yt
User:        MatthewTurk
Date:        2013-05-16 23:04:14
Summary:     Removing BoxlibLevel.
Affected #:  1 file

diff -r db3431380b9c83d64b81f97fc6dc72799c6d0779 -r 88199784b19d6129960f4180525bc8f8fd3dccc7 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -295,13 +295,6 @@
         dd = self._get_data_reader_dict()
         AMRHierarchy._setup_classes(self, dd)
 
-class BoxlibLevel:
-    def __init__(self,level,ngrids):
-        self.level = level
-        self.ngrids = ngrids
-        self.grids = []
-    
-
 class BoxlibStaticOutput(StaticOutput):
     """
     This class is a stripped down class that simply reads and parses


https://bitbucket.org/yt_analysis/yt-3.0/commits/6cf2fc77351f/
Changeset:   6cf2fc77351f
Branch:      yt
User:        MatthewTurk
Date:        2013-05-16 23:06:59
Summary:     Removing unused regexes.
Affected #:  1 file

diff -r 88199784b19d6129960f4180525bc8f8fd3dccc7 -r 6cf2fc77351f326cb07cf311deddc3f43263edae yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -57,9 +57,6 @@
 from .io import IOHandlerBoxlib
 
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
-_file_finder = re.compile(r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n")
-_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
-_data_files_finder = re.compile(r"Level_[\d]/")
 
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0


https://bitbucket.org/yt_analysis/yt-3.0/commits/5fce089a2ccb/
Changeset:   5fce089a2ccb
Branch:      yt
User:        MatthewTurk
Date:        2013-05-16 23:08:44
Summary:     Oops, I spoke too soon, this one *is* used.
Affected #:  1 file

diff -r 6cf2fc77351f326cb07cf311deddc3f43263edae -r 5fce089a2ccbc5b6aa2d7a5c75e39d98ad77ceec yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -57,6 +57,7 @@
 from .io import IOHandlerBoxlib
 
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
 
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0


https://bitbucket.org/yt_analysis/yt-3.0/commits/f027929193b7/
Changeset:   f027929193b7
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 15:24:56
Summary:     Making inputs file optional and get domain dimensions from header.

Also, note that this changes the unique_identifier to the Header file rather
than the inputs file.  It should also handle situations where refinement
factors are specified as a list, or not specified at all.
Affected #:  1 file

diff -r 5fce089a2ccbc5b6aa2d7a5c75e39d98ad77ceec -r f027929193b72b692a323176e86d83e231953ec0 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -370,8 +370,11 @@
         if os.path.isfile(self.fparameter_filename):
             self._parse_probin()
         # Let's read the file
-        self.unique_identifier = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
+        hfn = os.path.join(self.fullplotdir, 'Header')
+        self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
+        # the 'inputs' file is now optional
+        if not os.path.exists(self.parameter_filename):
+            return
         for line in (line.split("#")[0].strip() for line in
                      open(self.parameter_filename)):
             if len(line) == 0: continue
@@ -468,10 +471,30 @@
                                          dtype="float64")
         self.domain_right_edge = np.array(header_file.readline().split(),
                                          dtype="float64")
-        self.refine_by = int(header_file.readline())
-        # We now skip over global index space and timesteps per level
-        for i in range(2):
-            header_file.readline()
+        ref_factors = np.array([int(i) for i in
+                                header_file.readline().split()])
+        if ref_factors.size == 0:
+            # We use a default of two, as Nyx doesn't always output this value
+            ref_factors = [2]
+        # We can't vary refinement factors based on dimension, or whatever else
+        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # data has only two refinement factors, which I don't know how to
+        # understand.
+        assert(np.unique(ref_factors).size == 1)
+        self.refine_by = ref_factors[0]
+        # Now we read the global index space, to get 
+        index_space = header_file.readline()
+        # This will be of the form:
+        #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
+        # So note that if we split it all up based on spaces, we should be
+        # fine, as long as we take the first two entries, which correspond to
+        # the root level.  I'm not 100% pleased with this solution.
+        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        start = np.array(root_space[0].split(","), dtype="int64")
+        stop = np.array(root_space[1].split(","), dtype="int64")
+        self.domain_dimensions = stop - start + 1
+        # Skip timesteps per level
+        header_file.readline()
         self._header_mesh_start = header_file.tell()
 
     def _set_units(self):
@@ -480,8 +503,6 @@
         """
         self.units = {}
         self.time_units = {}
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
         self._setup_nounits_units()
         self.conversion_factors = defaultdict(lambda: 1.0)
         self.time_units['1'] = 1


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd09622412e9/
Changeset:   cd09622412e9
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 16:47:28
Summary:     Clean up endianness caching and some of IO.
Affected #:  3 files

diff -r f027929193b72b692a323176e86d83e231953ec0 -r cd09622412e92d7ab1501905406463093e19db29 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -48,16 +48,22 @@
 
 from .definitions import \
     orion2enzoDict, \
-    parameterDict, \
-    FAB_header_pattern
+    parameterDict
 from .fields import \
     OrionFieldInfo, \
     add_orion_field, \
     KnownOrionFields
 from .io import IOHandlerBoxlib
 
+# This is what we use to find scientific notation that might include d's
+# instead of e's.
 _scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+# This is the dimensions in the Cell_H file for each level
 _dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# This is the line that prefixes each set of data for a FAB in the FAB file
+_header_pattern = re.compile(r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, " +
+                             r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
+                             "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
 
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
@@ -196,23 +202,20 @@
         with different endian processors, then you're on your own!
         """
         # open the test file & grab the header
-        inFile = open(os.path.expanduser(test_grid.filename),'rb')
-        header = inFile.readline()
-        inFile.close()
-        header.strip()
+        with open(os.path.expanduser(test_grid.filename), 'rb') as f:
+            header = f.readline()
         
-        headerRe = re.compile(FAB_header_pattern)
-        bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
-        self._bytesPerReal = int(bytesPerReal)
-        if self._bytesPerReal == int(endian[0]):
-            dtype = '<'
-        elif self._bytesPerReal == int(endian[-1]):
-            dtype = '>'
+        bpr, endian, start, stop, centering, nc = \
+            _header_pattern.search(header).groups()
+        if bpr == endian[0]:
+            dtype = '<f%s' % bpr
+        elif bpr == endian[-1]:
+            dtype = '>f%s' % bpr
         else:
             raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
 
-        dtype += ('f%i' % self._bytesPerReal) # always a floating point
-        self._dtype = dtype
+        mylog.debug("FAB header suggests dtype of %s", dtype)
+        self._dtype = np.dtype(dtype)
 
     def __calculate_grid_dimensions(self, start_stop):
         start = np.array(map(int,start_stop[0].split(',')))

diff -r f027929193b72b692a323176e86d83e231953ec0 -r cd09622412e92d7ab1501905406463093e19db29 yt/frontends/boxlib/definitions.py
--- a/yt/frontends/boxlib/definitions.py
+++ b/yt/frontends/boxlib/definitions.py
@@ -71,4 +71,3 @@
                   "comoving_h": "CosmologyHubbleConstantNow"
                   }
 
-FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"

diff -r f027929193b72b692a323176e86d83e231953ec0 -r cd09622412e92d7ab1501905406463093e19db29 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -37,9 +37,6 @@
     def modify(self, field):
         return field.swapaxes(0,2)
 
-        def read(line, field):
-            return float(line.split(' ')[index[field]])
-
     def _read_data(self, grid, field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.
@@ -48,18 +45,14 @@
 
         filen = os.path.expanduser(grid.filename)
         offset1 = grid._offset
-        # one field has nElements * bytesPerReal bytes and is located
-        # nElements * bytesPerReal * field_index from the offset location
-        bytesPerReal = grid.hierarchy._bytesPerReal
+        dtype = grid.hierarchy._dtype
+        bpr = grid.hierarchy._dtype.itemsize
+        ne = grid.ActiveDimensions.prod()
+        field_index = grid.hierarchy.field_indexes[field]
+        offset2 = int(ne*bpr*field_index)
 
-        field_index = grid.hierarchy.field_indexes[field]
-        nElements = grid.ActiveDimensions.prod()
-        offset2 = int(nElements*bytesPerReal*field_index)
-
-        dtype = grid.hierarchy._dtype
-        field = np.empty(nElements, dtype=grid.hierarchy._dtype)
-        read_and_seek(filen, offset1, offset2, field, nElements * bytesPerReal)
-        field = field.reshape(grid.ActiveDimensions, order='F')
+        field = np.empty(grid.ActiveDimensions, dtype=dtype, order='F')
+        read_and_seek(filen, offset1, offset2, field, ne * bpr)
 
         return field
 


https://bitbucket.org/yt_analysis/yt-3.0/commits/360e8aecd55f/
Changeset:   360e8aecd55f
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 19:40:23
Summary:     Make more clear why we choose the endianness we do.  Remove parameters.
Affected #:  1 file

diff -r cd09622412e92d7ab1501905406463093e19db29 -r 360e8aecd55f0744023a99bf8dfea87afa6f91a7 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -61,7 +61,7 @@
 # This is the dimensions in the Cell_H file for each level
 _dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
 # This is the line that prefixes each set of data for a FAB in the FAB file
-_header_pattern = re.compile(r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, " +
+_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
                              r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
                              "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
 
@@ -207,6 +207,12 @@
         
         bpr, endian, start, stop, centering, nc = \
             _header_pattern.search(header).groups()
+        # Note that previously we were using a different value for BPR than we
+        # use now.  Here is an example set of information directly from BoxLib:
+        #  * DOUBLE data
+        #  * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27
+        #  * FLOAT data
+        #  * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
         if bpr == endian[0]:
             dtype = '<f%s' % bpr
         elif bpr == endian[-1]:
@@ -321,20 +327,14 @@
         self.fparameter_filename = fparamFilename
         self.__ipfn = paramFilename
 
-        self.fparameters = {}
-
         StaticOutput.__init__(self, plotname.rstrip("/"),
                               data_style='boxlib_native')
 
         # These should maybe not be hardcoded?
-        self.parameters["HydroMethod"] = 'orion' # always PPM DE
+        self.parameters["HydroMethod"] = 'boxlib'
         self.parameters["Time"] = 1. # default unit is 1...
-        self.parameters["DualEnergyFormalism"] = 0 # always off.
         self.parameters["EOSType"] = -1 # default
 
-        if self.fparameters.has_key("mu"):
-            self.parameters["mu"] = self.fparameters["mu"]
-
     def _localize(self, f, default):
         if f is None:
             return os.path.join(self.directory, default)


https://bitbucket.org/yt_analysis/yt-3.0/commits/b96267de4777/
Changeset:   b96267de4777
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 20:51:32
Summary:     Adding is_valid methods.  Maestro still not quite working for identification.
Affected #:  1 file

diff -r 360e8aecd55f0744023a99bf8dfea87afa6f91a7 -r b96267de477792261ce0f63cb52a899fb5685a19 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -65,6 +65,8 @@
                              r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
                              "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
 
+
+
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
 
@@ -108,7 +110,7 @@
     grid = BoxlibGrid
     def __init__(self, pf, data_style='boxlib_native'):
         self.data_style = data_style
-        self.header_filename = os.path.join(pf.fullplotdir, 'Header')
+        self.header_filename = os.path.join(pf.output_dir, 'Header')
 
         AMRHierarchy.__init__(self, pf, data_style)
         self._cache_endianness(self.grids[-1])
@@ -148,7 +150,7 @@
                 self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
                 self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
             # Now we get to the level header filename, which we open and parse.
-            fn = os.path.join(self.parameter_file.fullplotdir,
+            fn = os.path.join(self.parameter_file.output_dir,
                               header_file.next().strip())
             level_header_file = open(fn + "_H")
             level_dir = os.path.dirname(fn)
@@ -310,9 +312,12 @@
     _hierarchy_class = BoxlibHierarchy
     _fieldinfo_fallback = OrionFieldInfo
     _fieldinfo_known = KnownOrionFields
+    _output_prefix = None
 
-    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
-                 data_style='boxlib_native', paranoia=False,
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='boxlib_native',
                  storage_filename = None):
         """
         The paramfile is usually called "inputs"
@@ -321,65 +326,64 @@
         as per BoxLib, data_style will be Native (implemented here), IEEE (not
         yet implemented) or ASCII (not yet implemented.)
         """
+        self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
+        self.cparam_filename = self._localize_check(cparam_filename)
+        self.fparam_filename = self._localize_check(fparam_filename)
         self.storage_filename = storage_filename
-        self.paranoid_read = paranoia
-        self.parameter_filename = paramFilename
-        self.fparameter_filename = fparamFilename
-        self.__ipfn = paramFilename
 
-        StaticOutput.__init__(self, plotname.rstrip("/"),
-                              data_style='boxlib_native')
+        StaticOutput.__init__(self, output_dir, data_style='boxlib_native')
 
-        # These should maybe not be hardcoded?
+        # These are still used in a few places.
         self.parameters["HydroMethod"] = 'boxlib'
         self.parameters["Time"] = 1. # default unit is 1...
         self.parameters["EOSType"] = -1 # default
 
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
+    def _localize_check(self, fn):
+        # If the file exists, use it.  If not, set it to None.
+        full_fn = os.path.join(self.output_dir, fn)
+        if os.path.exists(full_fn):
+            return full_fn
+        return None
 
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args
-        pname = args[0].rstrip("/")
-        dn = os.path.dirname(pname)
-        if len(args) > 1: kwargs['paramFilename'] = args[1]
-        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename) and \
+           not os.path.exists(jobinfo_filename):
+            return True # We have no parameters to go off of
+        # If we do have either inputs or jobinfo, we should be deferring to a
+        # different frontend.
+        return False
 
-        # We check for the job_info file's existence because this is currently
-        # what distinguishes Orion data from MAESTRO data.
-        pfn = os.path.join(pfname)
-        if not os.path.exists(pfn): return False
-        castro = any(("castro." in line for line in open(pfn)))
-        nyx = any(("nyx." in line for line in open(pfn)))
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        really_orion = any(("geometry.prob_lo" in line for line in open(pfn)))
-        orion = (not castro) and (not maestro) and (not nyx) and really_orion
-        return orion
-        
     def _parse_parameter_file(self):
         """
         Parses the parameter file and establishes the various
         dictionaries.
         """
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
         self._parse_header_file()
-        self.parameter_filename = self._localize(
-                self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(
-                self.fparameter_filename, 'probin')
-        if os.path.isfile(self.fparameter_filename):
-            self._parse_probin()
         # Let's read the file
-        hfn = os.path.join(self.fullplotdir, 'Header')
+        hfn = os.path.join(self.output_dir, 'Header')
         self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
         # the 'inputs' file is now optional
-        if not os.path.exists(self.parameter_filename):
+        self._parse_cparams()
+        self._parse_fparams()
+
+    def _parse_cparams(self):
+        if self.cparam_filename is None:
             return
         for line in (line.split("#")[0].strip() for line in
-                     open(self.parameter_filename)):
+                     open(self.cparam_filename)):
             if len(line) == 0: continue
             param, vals = [s.strip() for s in line.split("=")]
             if param == "amr.n_cell":
@@ -389,7 +393,7 @@
             elif param == "Prob.lo_bc":
                 vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
             elif param == "castro.use_comoving":
-                vals = self.cosmological_simulation = bool(vals)
+                vals = self.cosmological_simulation = int(vals)
             else:
                 # Now we guess some things about the parameter and its type
                 v = vals.split()[0] # Just in case there are multiple; we'll go
@@ -408,12 +412,11 @@
                 if len(vals) == 1: vals = vals[0]
             self.parameters[param] = vals
 
-        if self.parameters.has_key("ComovingCoordinates") and bool(self.parameters["ComovingCoordinates"]):
-            self.cosmological_simulation = 1
+        if self.cosmological_simulation == 1:
             self.omega_lambda = self.parameters["comoving_OmL"]
             self.omega_matter = self.parameters["comoving_OmM"]
             self.hubble_constant = self.parameters["comoving_h"]
-            a_file = open(os.path.join(self.fullplotdir,'comoving_a'))
+            a_file = open(os.path.join(self.output_dir,'comoving_a'))
             line = a_file.readline().strip()
             a_file.close()
             self.current_redshift = 1/float(line) - 1
@@ -421,13 +424,15 @@
             self.current_redshift = self.omega_lambda = self.omega_matter = \
                 self.hubble_constant = self.cosmological_simulation = 0.0
 
-    def _parse_probin(self):
+    def _parse_fparams(self):
         """
         Parses the fortran parameter file for Orion. Most of this will
         be useless, but this is where it keeps mu = mass per
         particle/m_hydrogen.
         """
-        for line in (l for l in open(self.fparameter_filename) if "=" in l):
+        if self.fparam_filename is None:
+            return
+        for line in (l for l in open(self.fparam_filename) if "=" in l):
             param, vals = [v.strip() for v in line.split("=")]
             # Now, there are a couple different types of parameters.
             # Some will be where you only have floating point values, others
@@ -456,7 +461,7 @@
         # call readline() if we want to end up with an offset at the very end.
         # Fortunately, elsewhere we don't care about the offset, so we're fine
         # everywhere else using iteration exclusively.
-        header_file = open(os.path.join(self.fullplotdir,'Header'))
+        header_file = open(os.path.join(self.output_dir,'Header'))
         self.orion_version = header_file.readline().rstrip()
         n_fields = int(header_file.readline())
 
@@ -533,6 +538,33 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+class OrionStaticOutput(BoxlibStaticOutput):
+    
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
+
 class OrionHierarchy(BoxlibHierarchy):
     def _read_particles(self):
         """
@@ -546,7 +578,7 @@
         self.grid_particle_count = np.zeros(len(self.grids))
 
         for particle_filename in ["StarParticles", "SinkParticles"]:
-            fn = os.path.join(self.pf.fullplotdir, particle_filename)
+            fn = os.path.join(self.pf.output_dir, particle_filename)
             if os.path.exists(fn): self._read_particle_file(fn)
 
     def _read_particle_file(self, fn):
@@ -580,3 +612,24 @@
                     self.grids[ind].NumberOfParticles += 1
         return True
                 
+
+class CastroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        if not os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/df481f0f2226/
Changeset:   df481f0f2226
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 20:59:38
Summary:     Can distinguish (correctly if somewhat fragiley) between Boxlib types.

Nyx not yet implemented.
Affected #:  1 file

diff -r b96267de477792261ce0f63cb52a899fb5685a19 -r df481f0f2226b876b3349908f9f63f4300a2e057 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -340,7 +340,8 @@
 
     def _localize_check(self, fn):
         # If the file exists, use it.  If not, set it to None.
-        full_fn = os.path.join(self.output_dir, fn)
+        root_dir = os.path.dirname(self.output_dir)
+        full_fn = os.path.join(root_dir, fn)
         if os.path.exists(full_fn):
             return full_fn
         return None
@@ -412,7 +413,7 @@
                 if len(vals) == 1: vals = vals[0]
             self.parameters[param] = vals
 
-        if self.cosmological_simulation == 1:
+        if getattr(self, "cosmological_simulation", 0) == 1:
             self.omega_lambda = self.parameters["comoving_OmL"]
             self.omega_matter = self.parameters["comoving_OmM"]
             self.hubble_constant = self.parameters["comoving_h"]
@@ -626,10 +627,41 @@
             return False
         args = inspect.getcallargs(cls.__init__, args, kwargs)
         # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
         if not os.path.exists(jobinfo_filename):
             return False
+        if os.path.exists(fparam_filename):
+            return False
         # Now we check for all the others
         lines = open(jobinfo_filename).readlines()
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+class MaestroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if not os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        # Maestro outputs have "Castro" in them
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/a6081776f533/
Changeset:   a6081776f533
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 21:48:50
Summary:     Missed removing this unused routine.
Affected #:  1 file

diff -r df481f0f2226b876b3349908f9f63f4300a2e057 -r a6081776f533c1f11ed0f14b37d03419e0a386c4 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -225,12 +225,6 @@
         mylog.debug("FAB header suggests dtype of %s", dtype)
         self._dtype = np.dtype(dtype)
 
-    def __calculate_grid_dimensions(self, start_stop):
-        start = np.array(map(int,start_stop[0].split(',')))
-        stop = np.array(map(int,start_stop[1].split(',')))
-        dimension = stop - start + 1
-        return dimension,start,stop
-        
     def _populate_grid_objects(self):
         mylog.debug("Creating grid objects")
         self.grids = np.array(self.grids, dtype='object')


https://bitbucket.org/yt_analysis/yt-3.0/commits/6cb70d0e1b7f/
Changeset:   6cb70d0e1b7f
Branch:      yt
User:        MatthewTurk
Date:        2013-05-17 22:20:43
Summary:     Addressing Andrew's comments and fixing indentation error.
Affected #:  1 file

diff -r a6081776f533c1f11ed0f14b37d03419e0a386c4 -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -111,6 +111,7 @@
     def __init__(self, pf, data_style='boxlib_native'):
         self.data_style = data_style
         self.header_filename = os.path.join(pf.output_dir, 'Header')
+        self.directory = pf.output_dir
 
         AMRHierarchy.__init__(self, pf, data_style)
         self._cache_endianness(self.grids[-1])
@@ -193,7 +194,7 @@
                 self.grids.append(go)
             grid_counter += ngrids
             # already read the filenames above...
-            self.float_type = 'float64'
+        self.float_type = 'float64'
 
     def _cache_endianness(self,test_grid):
         """
@@ -399,7 +400,7 @@
                     pcast = str
                 else:
                     syms = (".", "D+", "D-", "E+", "E-")
-                    if any(sym in v for sym in syms for v in vals.split()):
+                    if any(sym in v.upper() for sym in syms for v in vals.split()):
                         pcast = float
                     else:
                         pcast = int


https://bitbucket.org/yt_analysis/yt-3.0/commits/6718ba4280eb/
Changeset:   6718ba4280eb
Branch:      yt
User:        Andrew Myers
Date:        2013-05-17 23:23:42
Summary:     changes to new Boxlib frontend to get particles working in Orion
Affected #:  3 files

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -326,7 +326,7 @@
         self.fparam_filename = self._localize_check(fparam_filename)
         self.storage_filename = storage_filename
 
-        StaticOutput.__init__(self, output_dir, data_style='boxlib_native')
+        StaticOutput.__init__(self, output_dir, data_style)
 
         # These are still used in a few places.
         self.parameters["HydroMethod"] = 'boxlib'
@@ -534,34 +534,13 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
-class OrionStaticOutput(BoxlibStaticOutput):
+class OrionHierarchy(BoxlibHierarchy):
     
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        output_dir = args[0]
-        header_filename = os.path.join(output_dir, "Header")
-        jobinfo_filename = os.path.join(output_dir, "job_info")
-        if not os.path.exists(header_filename):
-            # We *know* it's not boxlib if Header doesn't exist.
-            return False
-        args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow
-        inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
-        if not os.path.exists(inputs_filename):
-            return False
-        if os.path.exists(jobinfo_filename):
-            return False
-        # Now we check for all the others
-        lines = open(inputs_filename).readlines()
-        if any(("castro." in line for line in lines)): return False
-        if any(("nyx." in line for line in lines)): return False
-        if any(("geometry.prob_lo" in line for line in lines)): return True
-        return False
+    def __init__(self, pf, data_style='orion_native'):
+        BoxlibHierarchy.__init__(self, pf, data_style)
+        self._read_particles()
+        #self.io = IOHandlerOrion
 
-class OrionHierarchy(BoxlibHierarchy):
     def _read_particles(self):
         """
         reads in particles and assigns them to grids. Will search for
@@ -608,6 +587,43 @@
                     self.grids[ind].NumberOfParticles += 1
         return True
                 
+class OrionStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = OrionHierarchy
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='orion_native',
+                 storage_filename = None):
+
+        BoxlibStaticOutput.__init__(self, output_dir,
+                 cparam_filename, fparam_filename, data_style)
+          
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args                                                                               
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.                                      
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow                                                     
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others                                                             
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
 
 class CastroStaticOutput(BoxlibStaticOutput):
 

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -81,7 +81,10 @@
                  'particle_burnstate': 14,
                  'particle_id': 15}
 
-        fn = grid.pf.fullplotdir + "/StarParticles"
+        def read(line, field):
+            return float(line.split(' ')[index[field]])
+
+        fn = grid.pf.output_dir + "/StarParticles"
         with open(fn, 'r') as f:
             lines = f.readlines()
             particles = []

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 6718ba4280eb0a5439fc82b7377919b1ec51ef65 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -74,14 +74,14 @@
     EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
-from yt.frontends.castro.api import \
-    CastroStaticOutput, CastroFieldInfo, add_castro_field
+# from yt.frontends.castro.api import \
+#     CastroStaticOutput, CastroFieldInfo, add_castro_field
 
-from yt.frontends.nyx.api import \
-    NyxStaticOutput, NyxFieldInfo, add_nyx_field
+# from yt.frontends.nyx.api import \
+#     NyxStaticOutput, NyxFieldInfo, add_nyx_field
 
-from yt.frontends.orion.api import \
-    OrionStaticOutput, OrionFieldInfo, add_orion_field
+# from yt.frontends.orion.api import \
+#     OrionStaticOutput, OrionFieldInfo, add_orion_field
 
 from yt.frontends.flash.api import \
     FLASHStaticOutput, FLASHFieldInfo, add_flash_field


https://bitbucket.org/yt_analysis/yt-3.0/commits/02f36f34a525/
Changeset:   02f36f34a525
Branch:      yt
User:        MatthewTurk
Date:        2013-09-25 20:30:35
Summary:     Merging from mainline yt development.
Affected #:  462 files

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,17 @@
 juxtaposicion at gmail.com = cemoody at ucsc.edu
 chummels at gmail.com = chummels at astro.columbia.edu
 jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
 sam.skillman at gmail.com = samskillman at gmail.com
 casey at thestarkeffect.com = caseywstark at gmail.com
 chiffre = chiffre at posteo.de
 Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu
+jcforbes at ucsc.edu = jforbes at ucolick.org

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,6 +12,7 @@
 yt/utilities/kdtree/forthonf2c.h
 yt/utilities/libconfig_wrapper.c
 yt/utilities/spatial/ckdtree.c
+yt/utilities/lib/amr_kdtools.c
 yt/utilities/lib/CICDeposit.c
 yt/utilities/lib/ContourFinding.c
 yt/utilities/lib/DepthFirstOctree.c
@@ -32,6 +33,7 @@
 yt/utilities/lib/GridTree.c
 yt/utilities/lib/marching_cubes.c
 yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
 syntax: glob
 *.pyc
 .*.swp

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5156,3 +5156,4 @@
 0000000000000000000000000000000000000000 mpi-opaque
 f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
 0000000000000000000000000000000000000000 hop callback
+079e456c38a87676472a458210077e2be325dc85 last_gplv3

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 CITATION
--- /dev/null
+++ b/CITATION
@@ -0,0 +1,31 @@
+To cite yt in publications, please use:
+
+Turk, M. J., Smith, B. D., Oishi, J. S., et al. 2011, ApJS, 192, 9
+
+In the body of the text, please add a footnote to the yt webpage:
+
+http://yt-project.org/
+
+For LaTex and BibTex users:
+
+\bibitem[Turk et al.(2011)]{2011ApJS..192....9T} Turk, M.~J., Smith, B.~D.,
+Oishi, J.~S., et al.\ 2011, \apjs, 192, 9
+
+ at ARTICLE{2011ApJS..192....9T,
+   author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
+{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
+    title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
+  journal = {\apjs},
+archivePrefix = "arXiv",
+   eprint = {1011.3514},
+ primaryClass = "astro-ph.IM",
+ keywords = {cosmology: theory, methods: data analysis, methods: numerical},
+     year = 2011,
+    month = jan,
+   volume = 192,
+      eid = {9},
+    pages = {9},
+      doi = {10.1088/0067-0049/192/1/9},
+   adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
+  adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 COPYING.txt
--- /dev/null
+++ b/COPYING.txt
@@ -0,0 +1,81 @@
+===============================
+ The yt project licensing terms
+===============================
+
+yt is licensed under the terms of the Modified BSD License (also known as New
+or Revised BSD), as follows:
+
+Copyright (c) 2013-, yt Development Team
+Copyright (c) 2006-2013, Matthew Turk <matthewturk at gmail.com>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+Neither the name of the yt Development Team nor the names of its
+contributors may be used to endorse or promote products derived from this
+software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+About the yt Development Team
+-----------------------------
+
+Matthew Turk began yt in 2006 and remains the project lead.  Over time yt has
+grown to include contributions from a large number of individuals from many
+diverse institutions, scientific, and technical backgrounds.
+
+Until the fall of 2013, yt was licensed under the GPLv3.  However, with consent
+from all developers and on a public mailing list, yt has been relicensed under
+the BSD 3-clause under a shared copyright model.  For more information, see:
+http://lists.spacepope.org/pipermail/yt-dev-spacepope.org/2013-July/003239.html
+All versions of yt prior to this licensing change are available under the
+GPLv3; all subsequent versions are available under the BSD 3-clause license.
+
+The yt Development Team is the set of all contributors to the yt project.  This
+includes all of the yt subprojects.
+
+The core team that coordinates development on BitBucket can be found here:
+http://bitbucket.org/yt_analysis/ 
+
+
+Our Copyright Policy
+--------------------
+
+yt uses a shared copyright model. Each contributor maintains copyright
+over their contributions to yt. But, it is important to note that these
+contributions are typically only changes to the repositories. Thus, the yt
+source code, in its entirety is not the copyright of any single person or
+institution.  Instead, it is the collective copyright of the entire yt
+Development Team.  If individual contributors want to maintain a record of what
+changes/contributions they have specific copyright on, they should indicate
+their copyright in the commit message of the change, when they commit the
+change to one of the yt repositories.
+
+With this in mind, the following banner should be used in any source code file
+to indicate the copyright and license terms:
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,51 +1,55 @@
-YT is a group effort.
+yt is a group effort.
 
-Contributors:                   Tom Abel (tabel at stanford.edu)
-				David Collins (dcollins at physics.ucsd.edu)
-				Brian Crosby (crosby.bd at gmail.com)
-				Andrew Cunningham (ajcunn at gmail.com)
-				Nathan Goldbaum (goldbaum at ucolick.org)
-				Markus Haider (markus.haider at uibk.ac.at)
-				Cameron Hummels (chummels at gmail.com)
-				Christian Karch (chiffre at posteo.de)
-				Ji-hoon Kim (me at jihoonkim.org)
-				Steffen Klemer (sklemer at phys.uni-goettingen.de)
-				Kacper Kowalik (xarthisius.kk at gmail.com)
-				Michael Kuhlen (mqk at astro.berkeley.edu)
-				Eve Lee (elee at cita.utoronto.ca)
-				Yuan Li (yuan at astro.columbia.edu)
-				Chris Malone (chris.m.malone at gmail.com)
-				Josh Maloney (joshua.moloney at colorado.edu)
-				Chris Moody (cemoody at ucsc.edu)
-				Andrew Myers (atmyers at astro.berkeley.edu)
-				Jeff Oishi (jsoishi at gmail.com)
-				Jean-Claude Passy (jcpassy at uvic.ca)
-				Mark Richardson (Mark.L.Richardson at asu.edu)
-				Thomas Robitaille (thomas.robitaille at gmail.com)
-				Anna Rosen (rosen at ucolick.org)
-				Anthony Scopatz (scopatz at gmail.com)
-				Devin Silvia (devin.silvia at colorado.edu)
-				Sam Skillman (samskillman at gmail.com)
-				Stephen Skory (s at skory.us)
-				Britton Smith (brittonsmith at gmail.com)
-				Geoffrey So (gsiisg at gmail.com)
-				Casey Stark (caseywstark at gmail.com)
-				Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
-				Stephanie Tonnesen (stonnes at gmail.com)
-				Matthew Turk (matthewturk at gmail.com)
-				Rich Wagner (rwagner at physics.ucsd.edu)
-				John Wise (jwise at physics.gatech.edu)
-				John ZuHone (jzuhone at gmail.com)
+Contributors:   
+                Tom Abel (tabel at stanford.edu)
+                David Collins (dcollins at physics.ucsd.edu)
+                Brian Crosby (crosby.bd at gmail.com)
+                Andrew Cunningham (ajcunn at gmail.com)
+                Hilary Egan (hilaryye at gmail.com)
+                John Forces (jforbes at ucolick.org)
+                Nathan Goldbaum (goldbaum at ucolick.org)
+                Markus Haider (markus.haider at uibk.ac.at)
+                Cameron Hummels (chummels at gmail.com)
+                Christian Karch (chiffre at posteo.de)
+                Ji-hoon Kim (me at jihoonkim.org)
+                Steffen Klemer (sklemer at phys.uni-goettingen.de)
+                Kacper Kowalik (xarthisius.kk at gmail.com)
+                Michael Kuhlen (mqk at astro.berkeley.edu)
+                Eve Lee (elee at cita.utoronto.ca)
+                Sam Leitner (sam.leitner at gmail.com)
+                Yuan Li (yuan at astro.columbia.edu)
+                Chris Malone (chris.m.malone at gmail.com)
+                Josh Maloney (joshua.moloney at colorado.edu)
+                Chris Moody (cemoody at ucsc.edu)
+                Andrew Myers (atmyers at astro.berkeley.edu)
+                Jill Naiman (jnaiman at ucolick.org)
+                Kaylea Nelson (kaylea.nelson at yale.edu)
+                Jeff Oishi (jsoishi at gmail.com)
+                Jean-Claude Passy (jcpassy at uvic.ca)
+                Mark Richardson (Mark.L.Richardson at asu.edu)
+                Thomas Robitaille (thomas.robitaille at gmail.com)
+                Anna Rosen (rosen at ucolick.org)
+                Douglas Rudd (drudd at uchicago.edu)
+                Anthony Scopatz (scopatz at gmail.com)
+                Noel Scudder (noel.scudder at stonybrook.edu)
+                Devin Silvia (devin.silvia at colorado.edu)
+                Sam Skillman (samskillman at gmail.com)
+                Stephen Skory (s at skory.us)
+                Britton Smith (brittonsmith at gmail.com)
+                Geoffrey So (gsiisg at gmail.com)
+                Casey Stark (caseywstark at gmail.com)
+                Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+                Stephanie Tonnesen (stonnes at gmail.com)
+                Matthew Turk (matthewturk at gmail.com)
+                Rich Wagner (rwagner at physics.ucsd.edu)
+                Andrew Wetzel (andrew.wetzel at yale.edu)
+                John Wise (jwise at physics.gatech.edu)
+                John ZuHone (jzuhone at gmail.com)
 
-We also include the Delaunay Triangulation module written by Robert Kern of
-Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
+Several items included in the yt/extern directory were written by other
+individuals and may bear their own license, including the progressbar module by
 Nilton Volpato.  The PasteBin interface code (as well as the PasteBin itself)
-was written by the Pocoo collective (pocoo.org).  The RamsesRead++ library was
-developed by Oliver Hahn.  yt also includes a slightly-modified version of
-libconfig (http://www.hyperrealm.com/libconfig/) and an unmodified version of
-several routines from HEALpix (http://healpix.jpl.nasa.gov/).
-
-Large parts of development of yt were guided by discussions with Tom Abel, Ralf
-Kaehler, Mike Norman and Greg Bryan.
+was written by the Pocoo collective (pocoo.org).  
+developed by Oliver Hahn.  
 
 Thanks to everyone for all your contributions!

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 FUNDING
--- a/FUNDING
+++ /dev/null
@@ -1,35 +0,0 @@
-The development of yt has benefited from funding from many different sources
-and institutions.  Here is an incomplete list of these sources:
-
-  * NSF grant OCI-1048505
-  * NSF grant AST-0239709 
-  * NSF grant AST-0707474
-  * NSF grant AST-0708960
-  * NSF grant AST-0808184
-  * NSF grant AST-0807215 
-  * NSF grant AST-0807312
-  * NSF grant AST-0807075
-  * NSF grant AST-0908199
-  * NSF grant AST-0908553 
-  * NASA grant ATFP NNX08-AH26G
-  * NASA grant ATFP NNX09-AD80G
-  * NASA grant ATFP NNZ07-AG77G
-  * DOE Computational Science Graduate Fellowship under grant number DE-FG02-97ER25308
-
-Additionally, development of yt has benefited from the hospitality and hosting
-of the following institutions:
-
-  * Columbia University
-  * Harvard-Smithsonian Center for Astrophysics
-  * Institute for Advanced Study
-  * Kavli Institute for Particle Astrophysics and Cosmology
-  * Kavli Institute for Theoretical Physics
-  * Los Alamos National Lab
-  * Michigan State University
-  * Princeton University
-  * Stanford University
-  * University of California High-Performance Astro-Computing Center
-  * University of California at Berkeley
-  * University of California at San Diego
-  * University of California at Santa Cruz
-  * University of Colorado at Boulder

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 LICENSE.txt
--- a/LICENSE.txt
+++ /dev/null
@@ -1,674 +0,0 @@
-                    GNU GENERAL PUBLIC LICENSE
-                       Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-                            Preamble
-
-  The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
-  The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works.  By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users.  We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors.  You can apply it to
-your programs, too.
-
-  When we speak of free software, we are referring to freedom, not
-price.  Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
-  To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights.  Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
-  For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received.  You must make sure that they, too, receive
-or can get the source code.  And you must show them these terms so they
-know their rights.
-
-  Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
-  For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software.  For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
-  Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so.  This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software.  The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable.  Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products.  If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
-  Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary.  To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.
-
-                       TERMS AND CONDITIONS
-
-  0. Definitions.
-
-  "This License" refers to version 3 of the GNU General Public License.
-
-  "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
-  "The Program" refers to any copyrightable work licensed under this
-License.  Each licensee is addressed as "you".  "Licensees" and
-"recipients" may be individuals or organizations.
-
-  To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy.  The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
-  A "covered work" means either the unmodified Program or a work based
-on the Program.
-
-  To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy.  Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
-  To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies.  Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
-  An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License.  If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
-  1. Source Code.
-
-  The "source code" for a work means the preferred form of the work
-for making modifications to it.  "Object code" means any non-source
-form of a work.
-
-  A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
-  The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form.  A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
-  The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities.  However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work.  For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
-  The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
-  The Corresponding Source for a work in source code form is that
-same work.
-
-  2. Basic Permissions.
-
-  All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met.  This License explicitly affirms your unlimited
-permission to run the unmodified Program.  The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work.  This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
-  You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force.  You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright.  Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
-  Conveying under any other circumstances is permitted solely under
-the conditions stated below.  Sublicensing is not allowed; section 10
-makes it unnecessary.
-
-  3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
-  No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
-  When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
-  4. Conveying Verbatim Copies.
-
-  You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
-  You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
-  5. Conveying Modified Source Versions.
-
-  You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
-    a) The work must carry prominent notices stating that you modified
-    it, and giving a relevant date.
-
-    b) The work must carry prominent notices stating that it is
-    released under this License and any conditions added under section
-    7.  This requirement modifies the requirement in section 4 to
-    "keep intact all notices".
-
-    c) You must license the entire work, as a whole, under this
-    License to anyone who comes into possession of a copy.  This
-    License will therefore apply, along with any applicable section 7
-    additional terms, to the whole of the work, and all its parts,
-    regardless of how they are packaged.  This License gives no
-    permission to license the work in any other way, but it does not
-    invalidate such permission if you have separately received it.
-
-    d) If the work has interactive user interfaces, each must display
-    Appropriate Legal Notices; however, if the Program has interactive
-    interfaces that do not display Appropriate Legal Notices, your
-    work need not make them do so.
-
-  A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit.  Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
-  6. Conveying Non-Source Forms.
-
-  You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
-    a) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by the
-    Corresponding Source fixed on a durable physical medium
-    customarily used for software interchange.
-
-    b) Convey the object code in, or embodied in, a physical product
-    (including a physical distribution medium), accompanied by a
-    written offer, valid for at least three years and valid for as
-    long as you offer spare parts or customer support for that product
-    model, to give anyone who possesses the object code either (1) a
-    copy of the Corresponding Source for all the software in the
-    product that is covered by this License, on a durable physical
-    medium customarily used for software interchange, for a price no
-    more than your reasonable cost of physically performing this
-    conveying of source, or (2) access to copy the
-    Corresponding Source from a network server at no charge.
-
-    c) Convey individual copies of the object code with a copy of the
-    written offer to provide the Corresponding Source.  This
-    alternative is allowed only occasionally and noncommercially, and
-    only if you received the object code with such an offer, in accord
-    with subsection 6b.
-
-    d) Convey the object code by offering access from a designated
-    place (gratis or for a charge), and offer equivalent access to the
-    Corresponding Source in the same way through the same place at no
-    further charge.  You need not require recipients to copy the
-    Corresponding Source along with the object code.  If the place to
-    copy the object code is a network server, the Corresponding Source
-    may be on a different server (operated by you or a third party)
-    that supports equivalent copying facilities, provided you maintain
-    clear directions next to the object code saying where to find the
-    Corresponding Source.  Regardless of what server hosts the
-    Corresponding Source, you remain obligated to ensure that it is
-    available for as long as needed to satisfy these requirements.
-
-    e) Convey the object code using peer-to-peer transmission, provided
-    you inform other peers where the object code and Corresponding
-    Source of the work are being offered to the general public at no
-    charge under subsection 6d.
-
-  A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
-  A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling.  In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage.  For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product.  A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
-  "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source.  The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
-  If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information.  But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
-  The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed.  Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
-  Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
-  7. Additional Terms.
-
-  "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law.  If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
-  When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it.  (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.)  You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
-  Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
-    a) Disclaiming warranty or limiting liability differently from the
-    terms of sections 15 and 16 of this License; or
-
-    b) Requiring preservation of specified reasonable legal notices or
-    author attributions in that material or in the Appropriate Legal
-    Notices displayed by works containing it; or
-
-    c) Prohibiting misrepresentation of the origin of that material, or
-    requiring that modified versions of such material be marked in
-    reasonable ways as different from the original version; or
-
-    d) Limiting the use for publicity purposes of names of licensors or
-    authors of the material; or
-
-    e) Declining to grant rights under trademark law for use of some
-    trade names, trademarks, or service marks; or
-
-    f) Requiring indemnification of licensors and authors of that
-    material by anyone who conveys the material (or modified versions of
-    it) with contractual assumptions of liability to the recipient, for
-    any liability that these contractual assumptions directly impose on
-    those licensors and authors.
-
-  All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10.  If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term.  If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
-  If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
-  Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
-  8. Termination.
-
-  You may not propagate or modify a covered work except as expressly
-provided under this License.  Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
-  However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
-  Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
-  Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License.  If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
-  9. Acceptance Not Required for Having Copies.
-
-  You are not required to accept this License in order to receive or
-run a copy of the Program.  Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance.  However,
-nothing other than this License grants you permission to propagate or
-modify any covered work.  These actions infringe copyright if you do
-not accept this License.  Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
-  10. Automatic Licensing of Downstream Recipients.
-
-  Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License.  You are not responsible
-for enforcing compliance by third parties with this License.
-
-  An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations.  If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
-  You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License.  For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
-  11. Patents.
-
-  A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based.  The
-work thus licensed is called the contributor's "contributor version".
-
-  A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version.  For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
-  Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
-  In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement).  To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
-  If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients.  "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
-  If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
-  A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License.  You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
-  Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
-  12. No Surrender of Others' Freedom.
-
-  If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all.  For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
-  13. Use with the GNU Affero General Public License.
-
-  Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work.  The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
-  14. Revised Versions of this License.
-
-  The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time.  Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
-  Each version is given a distinguishing version number.  If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation.  If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
-  If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
-  Later license versions may give you additional or different
-permissions.  However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
-  15. Disclaimer of Warranty.
-
-  THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW.  EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU.  SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. Limitation of Liability.
-
-  IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
-  17. Interpretation of Sections 15 and 16.
-
-  If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
-                     END OF TERMS AND CONDITIONS
-
-            How to Apply These Terms to Your New Programs
-
-  If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
-  To do so, attach the following notices to the program.  It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the program's name and a brief idea of what it does.>
-    Copyright (C) <year><name of author>
-
-    This program is free software: you can redistribute it and/or modify
-    it under the terms of the GNU General Public License as published by
-    the Free Software Foundation, either version 3 of the License, or
-    (at your option) any later version.
-
-    This program is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-    GNU General Public License for more details.
-
-    You should have received a copy of the GNU General Public License
-    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
-  If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
-    <program>  Copyright (C) <year><name of author>
-    This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
-    This is free software, and you are welcome to redistribute it
-    under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License.  Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
-  You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
-  The GNU General Public License does not permit incorporating your program
-into proprietary programs.  If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library.  If this is what you want to do, use the GNU Lesser General
-Public License instead of this License.  But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
 include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
 recursive-include yt/gui/reason/html *.html *.png *.ico *.js
 recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 doc/get_yt.sh
--- /dev/null
+++ b/doc/get_yt.sh
@@ -0,0 +1,358 @@
+#
+# Hi there!  Welcome to the yt installation script.
+#
+# This script is designed to create a fully isolated Python installation
+# with the dependencies you need to run yt.
+#
+# This script is based on Conda, a distribution mechanism from Continuum
+# Analytics.  The process is as follows:
+#
+#  1. Download the appropriate Conda installation package
+#  2. Install Conda into the specified directory
+#  3. Install yt-specific dependencies
+#  4. Install yt
+#
+# There are a few options listed below, but by default, this will install
+# everything.  At the end, it will tell you what to do to use yt.
+#
+# By default this will install yt from source.
+#
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
+#
+DEST_SUFFIX="yt-conda"
+DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
+BRANCH="yt" # This is the branch to which we will forcibly update.
+INST_YT_SOURCE=1 # Do we do a source install of yt?
+
+##################################################################
+#                                                                #
+# You will likely not have to modify anything below this region. #
+#                                                                #
+##################################################################
+
+LOG_FILE="`pwd`/yt_install.log"
+
+# Here is the idiom for redirecting to the log file:
+# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
+MINICONDA_VERSION="1.9.1"
+YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
+
+function do_exit
+{
+    echo "********************************************"
+    echo "        FAILURE REPORT:"
+    echo "********************************************"
+    echo
+    tail -n 10 ${LOG_FILE}
+    echo
+    echo "********************************************"
+    echo "********************************************"
+    echo "Failure.  Check ${LOG_FILE}.  The last 10 lines are above."
+    exit 1
+}
+
+function log_cmd
+{
+    echo "EXECUTING:" >> ${LOG_FILE}
+    echo "  $*" >> ${LOG_FILE}
+    ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytproject
+{
+    [ -e $1 ] && return
+    echo "Downloading $1 from yt-project.org"
+    ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytdata
+{
+    echo "Downloading $1 from yt-project.org"
+    [ -e $1 ] && return
+    ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+    ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytrecipe {
+    RDIR=${DEST_DIR}/src/yt-recipes/$1
+    mkdir -p ${RDIR}
+    pushd ${RDIR}
+    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
+    log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
+    NEW_PKG=`conda build --output ${RDIR}`
+    log_cmd conda build --no-binstar-upload ${RDIR}
+    log_cmd conda install ${NEW_PKG}
+    popd
+}
+
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "Hi there!  This is the yt installation script.  We're going to download"
+echo "some stuff and install it to create a self-contained, isolated"
+echo "environment for yt to run within."
+echo
+echo "This will install Miniconda from Continuum Analytics, the necessary"
+echo "packages to run yt, and create a self-contained environment for you to"
+echo "use yt.  Additionally, Conda itself provides the ability to install"
+echo "many other packages that can be used for other purposes."
+echo
+MYOS=`uname -s`       # A guess at the OS
+if [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+  echo "Looks like you're running on Mac OSX."
+  echo
+  echo "NOTE: you must have the Xcode command line tools installed."
+  echo
+  echo "The instructions for obtaining these tools varies according"
+  echo "to your exact OS version.  On older versions of OS X, you"
+  echo "must register for an account on the apple developer tools"
+  echo "website: https://developer.apple.com/downloads to obtain the"
+  echo "download link."
+  echo
+  echo "We have gathered some additional instructions for each"
+  echo "version of OS X below. If you have trouble installing yt"
+  echo "after following these instructions, don't hesitate to contact"
+  echo "the yt user's e-mail list."
+  echo
+  echo "You can see which version of OSX you are running by clicking"
+  echo "'About This Mac' in the apple menu on the left hand side of"
+  echo "menu bar.  We're assuming that you've installed all operating"
+  echo "system updates; if you have an older version, we suggest"
+  echo "running software update and installing all available updates."
+  echo
+  echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+  echo "Apple developer tools website."
+  echo
+  echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+  echo "developer tools website.  You can either download the"
+  echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+  echo "Software Update to update to XCode 3.2.6 or"
+  echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+  echo "bundle (4.1 GB)."
+  echo
+  echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+  echo "(search for Xcode)."
+  echo "Alternatively, download the Xcode command line tools from"
+  echo "the Apple developer tools website."
+  echo
+  echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+  echo "(search for Xcode)."
+  echo "Additionally, you will have to manually install the Xcode"
+  echo "command line tools, see:"
+  echo "http://stackoverflow.com/questions/9353444"
+  echo "Alternatively, download the Xcode command line tools from"
+  echo "the Apple developer tools website."
+  echo
+  echo "NOTE: It's possible that the installation will fail, if so,"
+  echo "please set the following environment variables, remove any"
+  echo "broken installation tree, and re-run this script verbatim."
+  echo
+  echo "$ export CC=gcc"
+  echo "$ export CXX=g++"
+  echo
+  MINICONDA_OS="MacOSX-x86_64"
+fi
+if [ "${MYOS##Linux}" != "${MYOS}" ]
+then
+  echo "Looks like you're on Linux."
+  echo
+  echo "Please make sure you have the developer tools for your OS installed."
+  echo
+  if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+  then
+    echo "Looks like you're on an OpenSUSE-compatible machine."
+    echo
+    echo "You need to have these packages installed:"
+    echo
+    echo "  * devel_C_C++"
+    echo "  * libopenssl-devel"
+    echo "  * libuuid-devel"
+    echo "  * zip"
+    echo "  * gcc-c++"
+    echo "  * chrpath"
+    echo
+    echo "You can accomplish this by executing:"
+    echo
+    echo "$ sudo zypper install -t pattern devel_C_C++"
+    echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+    echo "$ sudo zypper install chrpath"
+  fi
+  if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+  then
+    echo "Looks like you're on an Ubuntu-compatible machine."
+    echo
+    echo "You need to have these packages installed:"
+    echo
+    echo "  * libssl-dev"
+    echo "  * build-essential"
+    echo "  * libncurses5"
+    echo "  * libncurses5-dev"
+    echo "  * zip"
+    echo "  * uuid-dev"
+    echo "  * chrpath"
+    echo
+    echo "You can accomplish this by executing:"
+    echo
+    echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+    echo
+  fi
+  echo
+  echo "If you are running on a supercomputer or other module-enabled"
+  echo "system, please make sure that the GNU module has been loaded."
+  echo
+  if [ "${MYOS##x86_64}" != "${MYOS}" ]
+  then
+    MINICONDA_OS="Linux-x86_64"
+  elif [ "${MYOS##i386}" != "${MYOS}" ]
+  then
+    MINICONDA_OS="Linux-x86"
+  else
+    echo "Not sure which type of Linux you're on.  Going with x86_64."
+    MINICONDA_OS="Linux-x86_64"
+  fi
+fi
+echo
+echo "If you'd rather not continue, hit Ctrl-C."
+echo
+echo "========================================================================"
+echo
+read -p "[hit enter] "
+echo
+echo "Awesome!  Here we go."
+echo
+
+MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
+
+if type -P wget &>/dev/null
+then
+    echo "Using wget"
+    export GETFILE="wget -nv"
+else
+    echo "Using curl"
+    export GETFILE="curl -sSO"
+fi
+
+echo
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
+echo
+
+${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
+
+echo "Installing the Miniconda python environment."
+
+log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
+
+# I don't think we need OR want this anymore:
+#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+
+# This we *do* need.
+export PATH=${DEST_DIR}/bin:$PATH
+
+echo "Installing the necessary packages for yt."
+echo "This may take a while, but don't worry.  yt loves you."
+
+declare -a YT_DEPS
+YT_DEPS+=('python')
+YT_DEPS+=('distribute')
+YT_DEPS+=('libpng')
+YT_DEPS+=('freetype')
+YT_DEPS+=('hdf5')
+YT_DEPS+=('numpy')
+YT_DEPS+=('pygments')
+YT_DEPS+=('jinja2')
+YT_DEPS+=('tornado')
+YT_DEPS+=('pyzmq')
+YT_DEPS+=('ipython')
+YT_DEPS+=('sphinx')
+YT_DEPS+=('h5py')
+YT_DEPS+=('matplotlib')
+YT_DEPS+=('cython')
+
+# Here is our dependency list for yt
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
+log_cmd conda update --yes conda
+
+echo "Current dependencies: ${YT_DEPS[@]}"
+log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
+log_cmd conda install --yes ${YT_DEPS[@]}
+
+echo "Installing mercurial."
+get_ytrecipe mercurial
+
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+  echo "Installing yt as a package."
+  get_ytrecipe yt
+else
+  # We do a source install.
+  YT_DIR="${DEST_DIR}/src/yt-hg"
+  export PNG_DIR=${DEST_DIR}
+  export FTYPE_DIR=${DEST_DIR}
+  export HDF5_DIR=${DEST_DIR}
+  log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+  pushd ${YT_DIR}
+  echo $DEST_DIR > hdf5.cfg
+  log_cmd python setup.py develop
+  popd
+  log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate 
+  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
+  log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
+  log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+fi
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "yt and the Conda system are now installed in $DEST_DIR ."
+echo
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+  echo "You must now modify your PATH variable by prepending:"
+  echo 
+  echo "   $DEST_DIR/bin"
+  echo
+  echo "For example, if you use bash, place something like this at the end"
+  echo "of your ~/.bashrc :"
+  echo
+  echo "   export PATH=$DEST_DIR/bin:$PATH"
+else
+  echo "To run from this new installation, use the activate script for this "
+  echo "environment."
+  echo
+  echo "    $ source $DEST_DIR/bin/activate"
+  echo
+  echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
+  echo "LD_LIBRARY_PATH to match your new yt install.  If you use csh, just"
+  echo "append .csh to the above."
+fi
+echo
+echo "To get started with yt, check out the orientation:"
+echo
+echo "    http://yt-project.org/doc/orientation/"
+echo
+echo "or just activate your environment and run 'yt serve' to bring up the"
+echo "yt GUI."
+echo
+echo "For support, see the website and join the mailing list:"
+echo
+echo "    http://yt-project.org/"
+echo "    http://yt-project.org/data/      (Sample data)"
+echo "    http://yt-project.org/doc/       (Docs)"
+echo
+echo "    http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
+echo
+echo "========================================================================"
+echo
+echo "Oh, look at me, still talking when there's science to do!"
+echo "Good luck, and email the user list if you run into any problems."

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 doc/how_to_develop_yt.txt
--- a/doc/how_to_develop_yt.txt
+++ b/doc/how_to_develop_yt.txt
@@ -25,7 +25,7 @@
 Licenses
 --------
 
-All code in yt should be under the GPL-3 (preferred) or a compatible license.
+All code in yt should be under the BSD 3-clause license.
 
 How To Get The Source Code
 --------------------------

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -10,14 +10,15 @@
 # subversion checkout of yt, you can set YT_DIR, too.  (It'll already
 # check the current directory and one up.
 #
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at 
+# http://yt-project.org.
 #
 
 DEST_SUFFIX="yt-`uname -m`"
 DEST_DIR="`pwd`/${DEST_SUFFIX/ /}"   # Installation location
 BRANCH="yt" # This is the branch to which we will forcibly update.
 
-if [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
 then
     DEST_DIR=${YT_DEST}
 fi
@@ -97,6 +98,48 @@
 
 LOG_FILE="${DEST_DIR}/yt_install.log"
 
+function write_config
+{
+    CONFIG_FILE=${DEST_DIR}/.yt_config
+
+    echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+    echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+    echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+    echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+    echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+    echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+    echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+    echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+    echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+    echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+    echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+    echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+    echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+    echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+    echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+    if [ ${HDF5_DIR} ]
+    then
+        echo ${HDF5_DIR} >> ${CONFIG_FILE}
+    fi
+    if [ ${NUMPY_ARGS} ]
+    then
+        echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+    fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+    write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+    USED_CONFIG=1
+    source ${CONFIG_FILE}
+fi
+
 function get_willwont
 {
     if [ $1 -eq 1 ]
@@ -375,6 +418,10 @@
 get_willwont ${INST_0MQ}
 echo "be installing ZeroMQ"
 
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_ROCKSTAR}
+echo "be installing Rockstar"
+
 echo
 
 if [ -z "$HDF5_DIR" ]
@@ -396,6 +443,12 @@
 echo "hit Ctrl-C."
 echo
 host_specific
+if [ ${USED_CONFIG} ]
+then
+    echo "Settings were loaded from ${CONFIG_FILE}."
+    echo "Remove this file if you wish to return to the default settings."
+    echo
+fi
 echo "========================================================================"
 echo
 read -p "[hit enter] "
@@ -420,11 +473,18 @@
 function do_setup_py
 {
     [ -e $1/done ] && return
-    echo "Installing $1 (arguments: '$*')"
-    [ ! -e $1/extracted ] && tar xfz $1.tar.gz
-    touch $1/extracted
-    cd $1
-    if [ ! -z `echo $1 | grep h5py` ]
+    LIB=$1
+    shift
+    if [ -z "$@" ]
+    then
+        echo "Installing $LIB"
+    else
+        echo "Installing $LIB (arguments: '$@')"
+    fi
+    [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+    touch $LIB/extracted
+    cd $LIB
+    if [ ! -z `echo $LIB | grep h5py` ]
     then
         shift
 	( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -466,8 +526,8 @@
 
 function get_ytproject
 {
+    [ -e $1 ] && return
     echo "Downloading $1 from yt-project.org"
-    [ -e $1 ] && return
     ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
     ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
 }
@@ -498,74 +558,100 @@
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
 # Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1  Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0  Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
 echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1  PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299  Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f  Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
 echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12  bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
 echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6  reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3  freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3  h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a  hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554  libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208  matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2  mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8  numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd  sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d  zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa  ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865  zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83  pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1  tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220  Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd  nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397  python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a  sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce  freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202  h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1  hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56  ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586  libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a  matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97  mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4  nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684  numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68  python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b  pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
 echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5  rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa  scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e  blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952  lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4  scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4  sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8  sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf  tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca  zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a  zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
 # Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
 [ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
 get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
 if [ $INST_BZLIB -eq 1 ]
 then
-    if [ ! -e bzip2-1.0.6/done ]
+    if [ ! -e $BZLIB/done ]
     then
-        [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+        [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
         echo "Installing BZLIB"
-        cd bzip2-1.0.6
+        cd $BZLIB
         if [ `uname` = "Darwin" ]
         then
             if [ -z "${CC}" ]
             then
                 sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
             else
-                sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
+                sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
             fi
         fi
         ( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -581,11 +667,11 @@
 
 if [ $INST_ZLIB -eq 1 ]
 then
-    if [ ! -e zlib-1.2.7/done ]
+    if [ ! -e $ZLIB/done ]
     then
-        [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+        [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
         echo "Installing ZLIB"
-        cd zlib-1.2.7
+        cd $ZLIB
         ( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -599,11 +685,11 @@
 
 if [ $INST_PNG -eq 1 ]
 then
-    if [ ! -e libpng-1.6.1/done ]
+    if [ ! -e $PNG/done ]
     then
-        [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+        [ ! -e $PNG ] && tar xfz $PNG.tar.gz
         echo "Installing PNG"
-        cd libpng-1.6.1
+        cd $PNG
         ( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -617,13 +703,14 @@
 
 if [ $INST_FTYPE -eq 1 ]
 then
-    if [ ! -e freetype-2.4.11/done ]
+    if [ ! -e $FREETYPE_VER/done ]
     then
-        [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+        [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
         echo "Installing FreeType2"
-        cd freetype-2.4.11
+        cd $FREETYPE_VER
         ( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
-        ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+        ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+		( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
@@ -635,11 +722,11 @@
 
 if [ -z "$HDF5_DIR" ]
 then
-    if [ ! -e hdf5-1.8.9/done ]
+    if [ ! -e $HDF5/done ]
     then
-        [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+        [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
         echo "Installing HDF5"
-        cd hdf5-1.8.9
+        cd $HDF5
         ( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -654,11 +741,11 @@
 
 if [ $INST_SQLITE3 -eq 1 ]
 then
-    if [ ! -e sqlite-autoconf-3071601/done ]
+    if [ ! -e $SQLITE/done ]
     then
-        [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+        [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
         echo "Installing SQLite3"
-        cd sqlite-autoconf-3071601
+        cd $SQLITE
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -667,11 +754,11 @@
     fi
 fi
 
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
 then
     echo "Installing Python.  This may take a while, but don't worry.  yt loves you."
-    [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
-    cd Python-2.7.4
+    [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+    cd $PYTHON
     ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
 
     ( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -686,7 +773,7 @@
 
 if [ $INST_HG -eq 1 ]
 then
-    do_setup_py mercurial-2.5.4
+    do_setup_py $MERCURIAL
     export HG_EXEC=${DEST_DIR}/bin/hg
 else
     # We assume that hg can be found in the path.
@@ -735,9 +822,9 @@
 
 if [ $INST_SCIPY -eq 0 ]
 then
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+    do_setup_py $NUMPY ${NUMPY_ARGS}
 else
-    if [ ! -e scipy-0.11.0/done ]
+    if [ ! -e $SCIPY/done ]
     then
 	if [ ! -e BLAS/done ]
 	then
@@ -745,27 +832,27 @@
 	    echo "Building BLAS"
 	    cd BLAS
 	    gfortran -O2 -fPIC -fno-second-underscore -c *.f
-	    ar r libfblas.a *.o 1>> ${LOG_FILE}
-	    ranlib libfblas.a 1>> ${LOG_FILE}
+	    ( ar r libfblas.a *.o 2>&1 ) 1>> ${LOG_FILE}
+	    ( ranlib libfblas.a 2>&1 ) 1>> ${LOG_FILE}
 	    rm -rf *.o
 	    touch done
 	    cd ..
 	fi
-	if [ ! -e lapack-3.4.2/done ]
+	if [ ! -e $LAPACK/done ]
 	then
-	    tar xfz lapack-3.4.2.tar.gz
+	    tar xfz $LAPACK.tar.gz
 	    echo "Building LAPACK"
-	    cd lapack-3.4.2/
+	    cd $LAPACK/
 	    cp INSTALL/make.inc.gfortran make.inc
-	    make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+	    ( make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 2>&1 ) 1>> ${LOG_FILE} || do_exit
 	    touch done
 	    cd ..
 	fi
     fi
     export BLAS=$PWD/BLAS/libfblas.a
-    export LAPACK=$PWD/lapack-3.4.2/liblapack.a
-    do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
-    do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+    export LAPACK=$PWD/$LAPACK/liblapack.a
+    do_setup_py $NUMPY ${NUMPY_ARGS}
+    do_setup_py $SCIPY ${NUMPY_ARGS}
 fi
 
 if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -787,10 +874,15 @@
     echo "Setting CFLAGS ${CFLAGS}"
 fi
 # Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+if [ `uname` = "Darwin" ]
+then
+   echo "[gui_support]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+   echo "macosx = False" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+fi
+do_setup_py $MATPLOTLIB
 if [ -n "${OLD_LDFLAGS}" ]
 then
     export LDFLAG=${OLD_LDFLAGS}
@@ -802,36 +894,36 @@
 # Now we do our IPython installation, which has two optional dependencies.
 if [ $INST_0MQ -eq 1 ]
 then
-    if [ ! -e zeromq-3.2.2/done ]
+    if [ ! -e $ZEROMQ/done ]
     then
-        [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+        [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
         echo "Installing ZeroMQ"
-        cd zeromq-3.2.2
+        cd $ZEROMQ
         ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
         ( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
         touch done
         cd ..
     fi
-    do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
-    do_setup_py tornado-3.0
+    do_setup_py $PYZMQ --zmq=${DEST_DIR}
+    do_setup_py $TORNADO
 fi
 
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
 
 # Now we build Rockstar and set its environment variable.
 if [ $INST_ROCKSTAR -eq 1 ]
 then
     if [ ! -e Rockstar/done ]
     then
-        [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+        [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
         echo "Building Rockstar"
         cd Rockstar
         ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -856,10 +948,10 @@
 touch done
 cd $MY_PWD
 
-if !(${DEST_DIR}/bin/python2.7 -c "import readline" >> ${LOG_FILE})
+if !( ( ${DEST_DIR}/bin/python2.7 -c "import readline" 2>&1 )>> ${LOG_FILE})
 then
     echo "Installing pure-python readline"
-    ${DEST_DIR}/bin/pip install readline 1>> ${LOG_FILE}
+    ( ${DEST_DIR}/bin/pip install readline 2>&1 ) 1>> ${LOG_FILE}
 fi
 
 if [ $INST_ENZO -eq 1 ]

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
 #!python
 import os, re
-from distutils import version
+from distutils.version import LooseVersion
 from yt.mods import *
 from yt.data_objects.data_containers import AMRData
 namespace = locals().copy()
@@ -23,10 +23,12 @@
     code.interact(doc, None, namespace)
     sys.exit()
 
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
     api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+    api_version = '0.11'
 else:
-    api_version = '0.11'
+    api_version = '1.0'
 
 if api_version == "0.10" and "DISPLAY" in os.environ:
     from matplotlib import rcParams
@@ -42,13 +44,18 @@
         ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
 elif api_version == "0.10":
     ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
-    from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+    if api_version == "0.11":
+        from IPython.frontend.terminal.interactiveshell import \
+            TerminalInteractiveShell
+    elif api_version == "1.0":
+        from IPython.terminal.interactiveshell import TerminalInteractiveShell
+    else:
+        raise RuntimeError
     ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
                     display_banner = True)
     if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
-    raise RuntimeError
+
 
 # The rest is a modified version of the IPython default profile code
 
@@ -77,7 +84,7 @@
     ip = ip_shell.IP.getapi()
     try_next = IPython.ipapi.TryNext
     kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
     ip = ip_shell
     try_next = IPython.core.error.TryNext
     kwargs = dict()

diff -r 6cb70d0e1b7fdcc07489f028bc949471bb8726d8 -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
 import subprocess
 import shutil
 import glob
-import distribute_setup
-distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+    import distribute_setup
+    distribute_setup.use_setuptools()
 
 from distutils.command.build_py import build_py
 from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
 # End snippet
 ######
 
-import setuptools
-
 VERSION = "2.6dev"
 
 if os.path.exists('MANIFEST'):
@@ -247,7 +248,7 @@
         classifiers=["Development Status :: 5 - Production/Stable",
                      "Environment :: Console",
                      "Intended Audience :: Science/Research",
-                     "License :: OSI Approved :: GNU General Public License (GPL)",
+                     "License :: OSI Approved :: BSD License",
                      "Operating System :: MacOS :: MacOS X",
                      "Operating System :: POSIX :: AIX",
                      "Operating System :: POSIX :: Linux",
@@ -268,7 +269,7 @@
         author="Matthew J. Turk",
         author_email="matthewturk at gmail.com",
         url="http://yt-project.org/",
-        license="GPL-3",
+        license="BSD",
         configuration=configuration,
         zip_safe=False,
         data_files=REASON_FILES,

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/857371033baf/
Changeset:   857371033baf
Branch:      yt
User:        MatthewTurk
Date:        2013-09-25 20:33:42
Summary:     Merging from Andrew's changes
Affected #:  3 files

diff -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 -r 857371033bafd4cbde09f884f6c775de81583f51 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -316,7 +316,7 @@
         self.fparam_filename = self._localize_check(fparam_filename)
         self.storage_filename = storage_filename
 
-        StaticOutput.__init__(self, output_dir, data_style='boxlib_native')
+        StaticOutput.__init__(self, output_dir, data_style)
 
         # These are still used in a few places.
         self.parameters["HydroMethod"] = 'boxlib'
@@ -524,34 +524,13 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
-class OrionStaticOutput(BoxlibStaticOutput):
+class OrionHierarchy(BoxlibHierarchy):
     
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        output_dir = args[0]
-        header_filename = os.path.join(output_dir, "Header")
-        jobinfo_filename = os.path.join(output_dir, "job_info")
-        if not os.path.exists(header_filename):
-            # We *know* it's not boxlib if Header doesn't exist.
-            return False
-        args = inspect.getcallargs(cls.__init__, args, kwargs)
-        # This might need to be localized somehow
-        inputs_filename = os.path.join(
-                            os.path.dirname(os.path.abspath(output_dir)),
-                            args['cparam_filename'])
-        if not os.path.exists(inputs_filename):
-            return False
-        if os.path.exists(jobinfo_filename):
-            return False
-        # Now we check for all the others
-        lines = open(inputs_filename).readlines()
-        if any(("castro." in line for line in lines)): return False
-        if any(("nyx." in line for line in lines)): return False
-        if any(("geometry.prob_lo" in line for line in lines)): return True
-        return False
+    def __init__(self, pf, data_style='orion_native'):
+        BoxlibHierarchy.__init__(self, pf, data_style)
+        self._read_particles()
+        #self.io = IOHandlerOrion
 
-class OrionHierarchy(BoxlibHierarchy):
     def _read_particles(self):
         """
         reads in particles and assigns them to grids. Will search for
@@ -598,6 +577,43 @@
                     self.grids[ind].NumberOfParticles += 1
         return True
                 
+class OrionStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = OrionHierarchy
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='orion_native',
+                 storage_filename = None):
+
+        BoxlibStaticOutput.__init__(self, output_dir,
+                 cparam_filename, fparam_filename, data_style)
+          
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args                                                                               
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.                                      
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow                                                     
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others                                                             
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
 
 class CastroStaticOutput(BoxlibStaticOutput):
 

diff -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 -r 857371033bafd4cbde09f884f6c775de81583f51 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -74,7 +74,12 @@
                  'particle_angmomen_x': 7,
                  'particle_angmomen_y': 8,
                  'particle_angmomen_z': 9,
-                 'particle_id': -1}
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
 
         if len(line.strip().split()) == 11:
             # these are vanilla sinks, do nothing

diff -r 02f36f34a52560377c06f8ab5e4da56a4d49f4e7 -r 857371033bafd4cbde09f884f6c775de81583f51 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -64,14 +64,14 @@
     EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
-from yt.frontends.castro.api import \
-    CastroStaticOutput, CastroFieldInfo, add_castro_field
+# from yt.frontends.castro.api import \
+#     CastroStaticOutput, CastroFieldInfo, add_castro_field
 
-from yt.frontends.nyx.api import \
-    NyxStaticOutput, NyxFieldInfo, add_nyx_field
+# from yt.frontends.nyx.api import \
+#     NyxStaticOutput, NyxFieldInfo, add_nyx_field
 
-from yt.frontends.orion.api import \
-    OrionStaticOutput, OrionFieldInfo, add_orion_field
+# from yt.frontends.orion.api import \
+#     OrionStaticOutput, OrionFieldInfo, add_orion_field
 
 from yt.frontends.flash.api import \
     FLASHStaticOutput, FLASHFieldInfo, add_flash_field


https://bitbucket.org/yt_analysis/yt-3.0/commits/4bde9f2ae227/
Changeset:   4bde9f2ae227
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-25 21:18:08
Summary:     Merging in boxlib bookmark to yt-3.0.
Affected #:  11 files

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -56,6 +56,7 @@
         self.pf = self.hierarchy.parameter_file  # weakref already
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
+        self.filename = filename
         self._last_mask = None
         self._last_count = -1
         self._last_selector_id = None

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/__init__.py
--- /dev/null
+++ b/yt/frontends/boxlib/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/api.py
--- /dev/null
+++ b/yt/frontends/boxlib/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      OrionGrid, \
+      OrionHierarchy, \
+      OrionStaticOutput
+
+from .fields import \
+      OrionFieldInfo, \
+      add_orion_field
+
+from .io import \
+      IOHandlerNative

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/data_structures.py
--- /dev/null
+++ b/yt/frontends/boxlib/data_structures.py
@@ -0,0 +1,673 @@
+"""
+Data structures for Boxlib Codes 
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import weakref
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
+import numpy as np
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.geometry.grid_geometry_handler import GridGeometryHandler
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.geometry.selection_routines import \
+    RegionSelector
+
+from .definitions import \
+    orion2enzoDict, \
+    parameterDict
+from .fields import \
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
+from .io import IOHandlerBoxlib
+# This is what we use to find scientific notation that might include d's
+# instead of e's.
+_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+# This is the dimensions in the Cell_H file for each level
+_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# This is the line that prefixes each set of data for a FAB in the FAB file
+_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
+                             r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
+                             "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
+
+
+
+class BoxlibGrid(AMRGridPatch):
+    _id_offset = 0
+
+    def __init__(self, grid_id, offset, filename = None,
+                 hierarchy = None):
+        super(BoxlibGrid, self).__init__(grid_id, filename, hierarchy)
+        self._offset = offset
+        self._parent_id = []
+        self._children_ids = []
+
+    def _prepare_grid(self):
+        super(BoxlibGrid, self)._prepare_grid()
+        my_ind = self.id - self._id_offset
+        self.start_index = self.hierarchy.grid_start_index[my_ind]
+
+    def get_global_startindex(self):
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        my_ind = self.id - self._id_offset
+        self.dds = self.hierarchy.level_dds[self.Level,:]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "BoxlibGrid_%04i" % (self.id)
+
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.hierarchy.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.hierarchy.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
+class BoxlibHierarchy(GridGeometryHandler):
+    grid = BoxlibGrid
+    def __init__(self, pf, data_style='boxlib_native'):
+        self.data_style = data_style
+        self.header_filename = os.path.join(pf.output_dir, 'Header')
+        self.directory = pf.output_dir
+
+        GridGeometryHandler.__init__(self, pf, data_style)
+        self._cache_endianness(self.grids[-1])
+        #self._read_particles()
+
+    def _parse_hierarchy(self):
+        """
+        read the global header file for an Boxlib plotfile output.
+        """
+        self.max_level = self.parameter_file._max_level
+        header_file = open(self.header_filename,'r')
+        # We can now skip to the point in the file we want to start parsing at.
+        header_file.seek(self.parameter_file._header_mesh_start)
+
+        dx = []
+        for i in range(self.max_level + 1):
+            dx.append([float(v) for v in header_file.next().split()])
+        self.level_dds = np.array(dx, dtype="float64")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("yt only supports cartesian coordinates.")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+
+        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        self.grids = []
+        grid_counter = 0
+        for level in range(self.max_level + 1):
+            vals = header_file.next().split()
+            # should this be grid_time or level_time??
+            lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+            assert(lev == level)
+            nsteps = int(header_file.next())
+            for gi in range(ngrids):
+                xlo, xhi = [float(v) for v in header_file.next().split()]
+                ylo, yhi = [float(v) for v in header_file.next().split()]
+                zlo, zhi = [float(v) for v in header_file.next().split()]
+                self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
+                self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
+            # Now we get to the level header filename, which we open and parse.
+            fn = os.path.join(self.parameter_file.output_dir,
+                              header_file.next().strip())
+            level_header_file = open(fn + "_H")
+            level_dir = os.path.dirname(fn)
+            # We skip the first two lines, although they probably contain
+            # useful information I don't know how to decipher.
+            level_header_file.next()
+            level_header_file.next()
+            # Now we get the number of data files
+            ncomp_this_file = int(level_header_file.next())
+            # Skip the next line, and we should get the number of grids / FABs
+            # in this file
+            level_header_file.next()
+            # To decipher this next line, we expect something like:
+            # (8 0
+            # where the first is the number of FABs in this level.
+            ngrids = int(level_header_file.next().split()[0][1:])
+            # Now we can iterate over each and get the indices.
+            for gi in range(ngrids):
+                # components within it
+                start, stop = _dim_finder.match(level_header_file.next()).groups()
+                start = np.array(start.split(","), dtype="int64")
+                stop = np.array(stop.split(","), dtype="int64")
+                dims = stop - start + 1
+                self.grid_dimensions[grid_counter + gi,:] = dims
+                self.grid_start_index[grid_counter + gi,:] = start
+            # Now we read two more lines.  The first of these is a close
+            # parenthesis.
+            level_header_file.next()
+            # This line I'm not 100% sure of, but it's either number of grids
+            # or number of FABfiles.
+            level_header_file.next()
+            # Now we iterate over grids to find their offsets in each file.
+            for gi in range(ngrids):
+                # Now we get the data file, at which point we're ready to
+                # create the grid.
+                dummy, filename, offset = level_header_file.next().split()
+                filename = os.path.join(level_dir, filename)
+                go = self.grid(grid_counter + gi, int(offset), filename, self)
+                go.Level = self.grid_levels[grid_counter + gi,:] = level
+                self.grids.append(go)
+            grid_counter += ngrids
+            # already read the filenames above...
+        self.float_type = 'float64'
+
+    def _cache_endianness(self,test_grid):
+        """
+        Cache the endianness and bytes perreal of the grids by using a
+        test grid and assuming that all grids have the same
+        endianness. This is a pretty safe assumption since Boxlib uses
+        one file per processor, and if you're running on a cluster
+        with different endian processors, then you're on your own!
+        """
+        # open the test file & grab the header
+        with open(os.path.expanduser(test_grid.filename), 'rb') as f:
+            header = f.readline()
+        
+        bpr, endian, start, stop, centering, nc = \
+            _header_pattern.search(header).groups()
+        # Note that previously we were using a different value for BPR than we
+        # use now.  Here is an example set of information directly from BoxLib:
+        #  * DOUBLE data
+        #  * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27
+        #  * FLOAT data
+        #  * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
+        if bpr == endian[0]:
+            dtype = '<f%s' % bpr
+        elif bpr == endian[-1]:
+            dtype = '>f%s' % bpr
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        mylog.debug("FAB header suggests dtype of %s", dtype)
+        self._dtype = np.dtype(dtype)
+
+    def _populate_grid_objects(self):
+        mylog.debug("Creating grid objects")
+        self.grids = np.array(self.grids, dtype='object')
+        self._reconstruct_parent_child()
+        for i, grid in enumerate(self.grids):
+            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            grid._prepare_grid()
+            grid._setup_dx()
+        mylog.debug("Done creating grid objects")
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
+
+    def _get_grid_children(self, grid):
+        mask = np.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
+        return np.where(mask)
+
+    def _count_grids(self):
+        # We can get everything from the Header file, but note that we're
+        # duplicating some work done elsewhere.  In a future where we don't
+        # pre-allocate grid arrays, this becomes unnecessary.
+        header_file = open(self.header_filename, 'r')
+        header_file.seek(self.parameter_file._header_mesh_start)
+        # Skip over the level dxs, geometry and the zero:
+        [header_file.next() for i in range(self.parameter_file._max_level + 3)]
+        # Now we need to be very careful, as we've seeked, and now we iterate.
+        # Does this work?  We are going to count the number of places that we
+        # have a three-item line.  The three items would be level, number of
+        # grids, and then grid time.
+        self.num_grids = 0
+        for line in header_file:
+            if len(line.split()) != 3: continue
+            self.num_grids += int(line.split()[1])
+        
+    def _initialize_grid_arrays(self):
+        super(BoxlibHierarchy, self)._initialize_grid_arrays()
+        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+
+    def _initialize_state_variables(self):
+        """override to not re-initialize num_grids in AMRHierarchy.__init__
+
+        """
+        self._parallel_locking = False
+        self._data_file = None
+        self._data_mode = None
+        self._max_locations = {}
+
+    def _detect_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = self.parameter_file._field_list[:]
+        self.field_indexes = dict((f, i)
+                                for i, f in enumerate(self.field_list))
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+class BoxlibStaticOutput(StaticOutput):
+    """
+    This class is a stripped down class that simply reads and parses
+    *filename*, without looking at the Boxlib hierarchy.
+    """
+    _hierarchy_class = BoxlibHierarchy
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
+    _output_prefix = None
+
+    # THIS SHOULD BE FIXED:
+    periodicity = (True, True, True)
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='boxlib_native',
+                 storage_filename = None):
+        """
+        The paramfile is usually called "inputs"
+        and there may be a fortran inputs file usually called "probin"
+        plotname here will be a directory name
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
+        """
+        self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
+        self.cparam_filename = self._localize_check(cparam_filename)
+        self.fparam_filename = self._localize_check(fparam_filename)
+        self.storage_filename = storage_filename
+
+        StaticOutput.__init__(self, output_dir, data_style)
+
+        # These are still used in a few places.
+        self.parameters["HydroMethod"] = 'boxlib'
+        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["EOSType"] = -1 # default
+
+    def _localize_check(self, fn):
+        # If the file exists, use it.  If not, set it to None.
+        root_dir = os.path.dirname(self.output_dir)
+        full_fn = os.path.join(root_dir, fn)
+        if os.path.exists(full_fn):
+            return full_fn
+        return None
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename) and \
+           not os.path.exists(jobinfo_filename):
+            return True # We have no parameters to go off of
+        # If we do have either inputs or jobinfo, we should be deferring to a
+        # different frontend.
+        return False
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+        self._parse_header_file()
+        # Let's read the file
+        hfn = os.path.join(self.output_dir, 'Header')
+        self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
+        # the 'inputs' file is now optional
+        self._parse_cparams()
+        self._parse_fparams()
+
+    def _parse_cparams(self):
+        if self.cparam_filename is None:
+            return
+        for line in (line.split("#")[0].strip() for line in
+                     open(self.cparam_filename)):
+            if len(line) == 0: continue
+            param, vals = [s.strip() for s in line.split("=")]
+            if param == "amr.n_cell":
+                vals = self.domain_dimensions = np.array(vals.split(), dtype='int32')
+            elif param == "amr.ref_ratio":
+                vals = self.refine_by = int(vals[0])
+            elif param == "Prob.lo_bc":
+                vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
+            elif param == "castro.use_comoving":
+                vals = self.cosmological_simulation = int(vals)
+            else:
+                # Now we guess some things about the parameter and its type
+                v = vals.split()[0] # Just in case there are multiple; we'll go
+                                    # back afterward to using vals.
+                try:
+                    float(v.upper().replace("D","E"))
+                except:
+                    pcast = str
+                else:
+                    syms = (".", "D+", "D-", "E+", "E-")
+                    if any(sym in v.upper() for sym in syms for v in vals.split()):
+                        pcast = float
+                    else:
+                        pcast = int
+                vals = [pcast(v) for v in vals.split()]
+                if len(vals) == 1: vals = vals[0]
+            self.parameters[param] = vals
+
+        if getattr(self, "cosmological_simulation", 0) == 1:
+            self.omega_lambda = self.parameters["comoving_OmL"]
+            self.omega_matter = self.parameters["comoving_OmM"]
+            self.hubble_constant = self.parameters["comoving_h"]
+            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            line = a_file.readline().strip()
+            a_file.close()
+            self.current_redshift = 1/float(line) - 1
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def _parse_fparams(self):
+        """
+        Parses the fortran parameter file for Orion. Most of this will
+        be useless, but this is where it keeps mu = mass per
+        particle/m_hydrogen.
+        """
+        if self.fparam_filename is None:
+            return
+        for line in (l for l in open(self.fparam_filename) if "=" in l):
+            param, vals = [v.strip() for v in line.split("=")]
+            # Now, there are a couple different types of parameters.
+            # Some will be where you only have floating point values, others
+            # will be where things are specified as string literals.
+            # Unfortunately, we're also using Fortran values, which will have
+            # things like 1.d-2 which is pathologically difficult to parse if
+            # your C library doesn't include 'd' in its locale for strtod.
+            # So we'll try to determine this.
+            vals = vals.split()
+            if any(_scinot_finder.match(v) for v in vals):
+                vals = [float(v.replace("D","e").replace("d","e"))
+                        for v in vals]
+            if len(vals) == 1:
+                vals = vals[0]
+            self.parameters[param] = vals
+
+    def _parse_header_file(self):
+        """
+        We parse the Boxlib header, which we use as our basis.  Anything in the
+        inputs file will override this, but the inputs file is not strictly
+        necessary for orientation of the data in space.
+        """
+
+        # Note: Python uses a read-ahead buffer, so using .next(), which would
+        # be my preferred solution, won't work here.  We have to explicitly
+        # call readline() if we want to end up with an offset at the very end.
+        # Fortunately, elsewhere we don't care about the offset, so we're fine
+        # everywhere else using iteration exclusively.
+        header_file = open(os.path.join(self.output_dir,'Header'))
+        self.orion_version = header_file.readline().rstrip()
+        n_fields = int(header_file.readline())
+
+        self._field_list = [header_file.readline().strip()
+                           for i in range(n_fields)]
+
+        self.dimensionality = int(header_file.readline())
+        if self.dimensionality != 3:
+            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+        self.current_time = float(header_file.readline())
+        # This is traditionally a hierarchy attribute, so we will set it, but
+        # in a slightly hidden variable.
+        self._max_level = int(header_file.readline()) 
+        self.domain_left_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        self.domain_right_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        ref_factors = np.array([int(i) for i in
+                                header_file.readline().split()])
+        if ref_factors.size == 0:
+            # We use a default of two, as Nyx doesn't always output this value
+            ref_factors = [2]
+        # We can't vary refinement factors based on dimension, or whatever else
+        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # data has only two refinement factors, which I don't know how to
+        # understand.
+        assert(np.unique(ref_factors).size == 1)
+        self.refine_by = ref_factors[0]
+        # Now we read the global index space, to get 
+        index_space = header_file.readline()
+        # This will be of the form:
+        #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
+        # So note that if we split it all up based on spaces, we should be
+        # fine, as long as we take the first two entries, which correspond to
+        # the root level.  I'm not 100% pleased with this solution.
+        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        start = np.array(root_space[0].split(","), dtype="int64")
+        stop = np.array(root_space[1].split(","), dtype="int64")
+        self.domain_dimensions = stop - start + 1
+        # Skip timesteps per level
+        header_file.readline()
+        self._header_mesh_start = header_file.tell()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+class OrionHierarchy(BoxlibHierarchy):
+    
+    def __init__(self, pf, data_style='orion_native'):
+        BoxlibHierarchy.__init__(self, pf, data_style)
+        self._read_particles()
+        #self.io = IOHandlerOrion
+
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = np.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.output_dir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip()[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+        return True
+                
+class OrionStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = OrionHierarchy
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='orion_native',
+                 storage_filename = None):
+
+        BoxlibStaticOutput.__init__(self, output_dir,
+                 cparam_filename, fparam_filename, data_style)
+          
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args                                                                               
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.                                      
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow                                                     
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others                                                             
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
+
+class CastroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+class MaestroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if not os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        # Maestro outputs have "Castro" in them
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/definitions.py
--- /dev/null
+++ b/yt/frontends/boxlib/definitions.py
@@ -0,0 +1,62 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.funcs import *
+
+# TODO: get rid of enzo parameters we do not need
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                }
+
+
+# converts the Orion inputs file name to the Enzo/yt name expected
+# throughout the code. key is Orion name, value is Enzo/yt equivalent
+orion2enzoDict = {"amr.n_cell": "TopGridDimensions",
+                  "materials.gamma": "Gamma",
+                  "amr.ref_ratio": "RefineBy",
+                  "castro.use_comoving": "ComovingCoordinates",
+                  "castro.redshift_in": "CosmologyInitialRedshift",
+                  "comoving_OmL": "CosmologyOmegaLambdaNow",
+                  "comoving_OmM": "CosmologyOmegaMatterNow",
+                  "comoving_h": "CosmologyHubbleConstantNow"
+                  }
+

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/fields.py
--- /dev/null
+++ b/yt/frontends/boxlib/fields.py
@@ -0,0 +1,183 @@
+"""
+Orion-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.physical_constants import \
+    mh, kboltz
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+
+
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
+
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
+
+add_orion_field("density", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_orion_field("eden", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
+
+add_orion_field("xmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+add_orion_field("ymom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
+
+add_orion_field("zmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+translation_dict = {"x-velocity": "xvel",
+                    "y-velocity": "yvel",
+                    "z-velocity": "zvel",
+                    "Density": "density",
+                    "TotalEnergy": "eden",
+                    "Temperature": "temperature",
+                    "x-momentum": "xmom",
+                    "y-momentum": "ymom",
+                    "z-momentum": "zmom"
+                   }
+
+for f,v in translation_dict.items():
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)])
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
+
+def _xVelocity(field, data):
+    """generate x-velocity from x-momentum and density
+    
+    """
+    return data["xmom"]/data["density"]
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _yVelocity(field,data):
+    """generate y-velocity from y-momentum and density
+
+    """
+    #try:
+    #    return data["xvel"]
+    #except KeyError:
+    return data["ymom"]/data["density"]
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _zVelocity(field,data):
+    """generate z-velocity from z-momentum and density
+    
+    """
+    return data["zmom"]/data["density"]
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _ThermalEnergy(field, data):
+    """generate thermal (gas energy). Dual Energy Formalism was
+        implemented by Stella, but this isn't how it's called, so I'll
+        leave that commented out for now.
+    """
+    #if data.pf["DualEnergyFormalism"]:
+    #    return data["GasEnergy"]
+    #else:
+    return data["TotalEnergy"] - 0.5 * data["density"] * (
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
+add_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
+
+def _Pressure(field,data):
+    """M{(Gamma-1.0)*e, where e is thermal energy density
+       NB: this will need to be modified for radiation
+    """
+    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+
+def _Temperature(field,data):
+    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+
+# particle fields
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+
+    return _Particles
+
+_particle_field_list = ["mass", 
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "r",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "luminosity",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/io.py
--- /dev/null
+++ b/yt/frontends/boxlib/io.py
@@ -0,0 +1,141 @@
+"""
+Orion data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+from yt.utilities.lib import read_castro_particles, read_and_seek
+from yt.utilities.io_handler import \
+           BaseIOHandler
+
+class IOHandlerBoxlib(BaseIOHandler):
+
+    _data_style = "boxlib_native"
+
+    def modify(self, field):
+        return field.swapaxes(0,2)
+
+    def _read_data(self, grid, field):
+        """
+        reads packed multiFABs output by BoxLib in "NATIVE" format.
+
+        """
+
+        filen = os.path.expanduser(grid.filename)
+        offset1 = grid._offset
+        dtype = grid.hierarchy._dtype
+        bpr = grid.hierarchy._dtype.itemsize
+        ne = grid.ActiveDimensions.prod()
+        field_index = grid.hierarchy.field_indexes[field]
+        offset2 = int(ne*bpr*field_index)
+
+        field = np.empty(grid.ActiveDimensions, dtype=dtype, order='F')
+        read_and_seek(filen, offset1, offset2, field, ne * bpr)
+
+        return field
+
+class IOHandlerOrion(IOHandlerBoxlib):
+    _data_style = "orion_native"
+
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+
+        def read(line, field):
+            return float(line.strip().split(' ')[index[field]])
+
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)
+
+class IOHandlerCastro(IOHandlerBoxlib):
+    _data_style = "castro_native"
+
+    def _read_particle_field(self, grid, field):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+            castro_particle_field_names.index(field),
+            len(castro_particle_field_names),
+            tr)
+        return tr
+

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/boxlib/setup.py
--- /dev/null
+++ b/yt/frontends/boxlib/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ b/yt/frontends/castro/io.py
@@ -31,17 +31,6 @@
     def modify(self, field):
         return field.swapaxes(0,2)
 
-    def _read_particle_field(self, grid, field):
-        offset = grid._particle_offset
-        filen = os.path.expanduser(grid.particle_filename)
-        off = grid._particle_offset
-        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
-        read_castro_particles(filen, off,
-            castro_particle_field_names.index(field),
-            len(castro_particle_field_names),
-            tr)
-        return tr
-
     def _read_data(self, grid, field):
         """
         reads packed multiFABs output by BoxLib in "NATIVE" format.

diff -r 08d537fcc7133a7203c2d418218bc928c13ae08b -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -65,14 +65,14 @@
     EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
-from yt.frontends.castro.api import \
-    CastroStaticOutput, CastroFieldInfo, add_castro_field
+# from yt.frontends.castro.api import \
+#     CastroStaticOutput, CastroFieldInfo, add_castro_field
 
-from yt.frontends.nyx.api import \
-    NyxStaticOutput, NyxFieldInfo, add_nyx_field
+# from yt.frontends.nyx.api import \
+#     NyxStaticOutput, NyxFieldInfo, add_nyx_field
 
-from yt.frontends.orion.api import \
-    OrionStaticOutput, OrionFieldInfo, add_orion_field
+# from yt.frontends.orion.api import \
+#     OrionStaticOutput, OrionFieldInfo, add_orion_field
 
 from yt.frontends.flash.api import \
     FLASHStaticOutput, FLASHFieldInfo, add_flash_field


https://bitbucket.org/yt_analysis/yt-3.0/commits/31ede4c5c82d/
Changeset:   31ede4c5c82d
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-25 23:24:22
Summary:     Merging from yt-3.0 tip
Affected #:  7 files

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -327,7 +327,7 @@
         icoords = chunk.icoords
         i1 = icoords[:,x_dict[self.axis]]
         i2 = icoords[:,y_dict[self.axis]]
-        ilevel = chunk.ires
+        ilevel = chunk.ires * self.pf.ires_factor
         tree.initialize_chunk(i1, i2, ilevel)
 
     def _handle_chunk(self, chunk, fields, tree):
@@ -347,7 +347,7 @@
         icoords = chunk.icoords
         i1 = icoords[:,x_dict[self.axis]]
         i2 = icoords[:,y_dict[self.axis]]
-        ilevel = chunk.ires
+        ilevel = chunk.ires * self.pf.ires_factor
         tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
 
     def to_pw(self, fields=None, center='c', width=None, axes_unit=None, 

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -314,6 +314,13 @@
             return self._last_finfo
         raise YTFieldNotFound((ftype, fname), self)
 
+    @property
+    def ires_factor(self):
+        o2 = np.log2(self.refine_by)
+        if o2 != int(o2):
+            raise RuntimeError
+        return int(o2)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -21,6 +21,7 @@
       load_uniform_grid, \
       load_amr_grids, \
       load_particles, \
+      load_hexahedral_mesh, \
       refine_amr
 
 from .fields import \

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -16,6 +16,7 @@
 import weakref
 import numpy as np
 import uuid
+from itertools import chain, product
 
 from yt.utilities.io_handler import io_registry
 from yt.funcs import *
@@ -26,6 +27,8 @@
     GridGeometryHandler
 from yt.geometry.particle_geometry_handler import \
     ParticleGeometryHandler
+from yt.geometry.unstructured_mesh_handler import \
+           UnstructuredGeometryHandler
 from yt.data_objects.static_output import \
     StaticOutput
 from yt.utilities.logger import ytLogger as mylog
@@ -41,6 +44,8 @@
     FlaggingGrid
 from yt.frontends.sph.data_structures import \
     ParticleFile
+from yt.data_objects.unstructured_mesh import \
+           SemiStructuredMesh
 
 from .fields import \
     StreamFieldInfo, \
@@ -829,3 +834,142 @@
 
     return spf
 
+def hexahedral_connectivity(xgrid, ygrid, zgrid):
+    nx = len(xgrid)
+    ny = len(ygrid)
+    nz = len(zgrid)
+    coords = np.fromiter(chain.from_iterable(product(xgrid, ygrid, zgrid)),
+                    dtype=np.float64, count = nx*ny*nz*3)
+    coords.shape = (nx*ny*nz, 3)
+    cis = np.fromiter(chain.from_iterable(product([0,1], [0,1], [0,1])),
+                    dtype=np.int64, count = 8*3)
+    cis.shape = (8, 3)
+    cycle = np.fromiter(chain.from_iterable(product(*map(range, (nx-1, ny-1, nz-1)))),
+                    dtype=np.int64, count = (nx-1)*(ny-1)*(nz-1)*3)
+    cycle.shape = ((nx-1)*(ny-1)*(nz-1), 3)
+    off = cis + cycle[:, np.newaxis]
+    connectivity = ((off[:,:,0] * ny) + off[:,:,1]) * nz + off[:,:,2]
+    return coords, connectivity
+
+class StreamHexahedralMesh(SemiStructuredMesh):
+    _connectivity_length = 8
+    _index_offset = 0
+
+class StreamHexahedralHierarchy(UnstructuredGeometryHandler):
+
+    def __init__(self, pf, data_style = None):
+        self.stream_handler = pf.stream_handler
+        super(StreamHexahedralHierarchy, self).__init__(pf, data_style)
+
+    def _initialize_mesh(self):
+        coords = self.stream_handler.fields.pop('coordinates')
+        connec = self.stream_handler.fields.pop('connectivity')
+        self.meshes = [StreamHexahedralMesh(0,
+          self.hierarchy_filename, connec, coords, self)]
+
+    def _setup_data_io(self):
+        if self.stream_handler.io is not None:
+            self.io = self.stream_handler.io
+        else:
+            self.io = io_registry[self.data_style](self.stream_handler)
+
+    def _detect_fields(self):
+        self.field_list = list(set(self.stream_handler.get_fields()))
+
+class StreamHexahedralStaticOutput(StreamStaticOutput):
+    _hierarchy_class = StreamHexahedralHierarchy
+    _fieldinfo_fallback = StreamFieldInfo
+    _fieldinfo_known = KnownStreamFields
+    _data_style = "stream_hexahedral"
+
+def load_hexahedral_mesh(data, connectivity, coordinates,
+                         sim_unit_to_cm, bbox=None,
+                         sim_time=0.0, periodicity=(True, True, True)):
+    r"""Load a hexahedral mesh of data into yt as a
+    :class:`~yt.frontends.stream.data_structures.StreamHandler`.
+
+    This should allow a semistructured grid of data to be loaded directly into
+    yt and analyzed as would any others.  This comes with several caveats:
+        * Units will be incorrect unless the data has already been converted to
+          cgs.
+        * Some functions may behave oddly, and parallelism will be
+          disappointing or non-existent in most cases.
+        * Particles may be difficult to integrate.
+
+    Particle fields are detected as one-dimensional fields. The number of particles
+    is set by the "number_of_particles" key in data.
+    
+    Parameters
+    ----------
+    data : dict
+        This is a dict of numpy arrays, where the keys are the field names.
+        There must only be one.
+    connectivity : array_like
+        This should be of size (N,8) where N is the number of zones.
+    coordinates : array_like
+        This should be of size (M,3) where M is the number of vertices
+        indicated in the connectivity matrix.
+    sim_unit_to_cm : float
+        Conversion factor from simulation units to centimeters
+    bbox : array_like (xdim:zdim, LE:RE), optional
+        Size of computational domain in units sim_unit_to_cm
+    sim_time : float, optional
+        The simulation time in seconds
+    periodicity : tuple of booleans
+        Determines whether the data will be treated as periodic along
+        each axis
+
+    """
+
+    domain_dimensions = np.ones(3, "int32") * 2
+    nprocs = 1
+    if bbox is None:
+        bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+    domain_left_edge = np.array(bbox[:, 0], 'float64')
+    domain_right_edge = np.array(bbox[:, 1], 'float64')
+    grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+    sfh = StreamDictFieldHandler()
+    
+    particle_types = set_particle_types(data)
+    
+    sfh.update({'connectivity': connectivity,
+                'coordinates': coordinates,
+                0: data})
+    grid_left_edges = domain_left_edge
+    grid_right_edges = domain_right_edge
+    grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+    # I'm not sure we need any of this.
+    handler = StreamHandler(
+        grid_left_edges,
+        grid_right_edges,
+        grid_dimensions,
+        grid_levels,
+        -np.ones(nprocs, dtype='int64'),
+        np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+        np.zeros(nprocs).reshape((nprocs,1)),
+        sfh,
+        particle_types=particle_types,
+        periodicity=periodicity
+    )
+
+    handler.name = "HexahedralMeshData"
+    handler.domain_left_edge = domain_left_edge
+    handler.domain_right_edge = domain_right_edge
+    handler.refine_by = 2
+    handler.dimensionality = 3
+    handler.domain_dimensions = domain_dimensions
+    handler.simulation_time = sim_time
+    handler.cosmology_simulation = 0
+
+    spf = StreamHexahedralStaticOutput(handler)
+    spf.units["cm"] = sim_unit_to_cm
+    spf.units['1'] = 1.0
+    spf.units["unitary"] = 1.0
+    box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+    for unit in mpc_conversion.keys():
+        spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+    return spf
+

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -199,3 +199,32 @@
 
     def _identify_fields(self, data_file):
         return [ ("all", k) for k in self.fields[data_file.filename].keys()]
+
+class IOHandlerStreamHexahedral(BaseIOHandler):
+    _data_style = "stream_hexahedral"
+
+    def __init__(self, stream_handler):
+        self.fields = stream_handler.fields
+        BaseIOHandler.__init__(self)
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        assert(len(chunks) == 1)
+        chunk = chunks[0]
+        rv = {}
+        for field in fields:
+            ftype, fname = field
+            rv[field] = np.empty(size, dtype="float64")
+        ngrids = sum(len(chunk.objs) for chunk in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s blocks",
+                    size, [fname for ftype, fname in fields], ngrids)
+        for field in fields:
+            ind = 0
+            ftype, fname = field
+            for chunk in chunks:
+                for g in chunk.objs:
+                    ds = self.fields[g.mesh_id].get(field, None)
+                    if ds is None:
+                        ds = self.fields[g.mesh_id][fname]
+                    ind += g.select(selector, ds, rv[field], ind) # caches
+        return rv

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/frontends/stream/tests/test_stream_hexahedral.py
--- /dev/null
+++ b/yt/frontends/stream/tests/test_stream_hexahedral.py
@@ -0,0 +1,56 @@
+import numpy as np
+from yt.mods import *
+from yt.testing import *
+from yt.frontends.stream.api import \
+    load_hexahedral_mesh, load_uniform_grid
+from yt.frontends.stream.data_structures import \
+    hexahedral_connectivity
+
+def setup() :
+    pass
+
+# Field information
+
+def test_stream_hexahedral() :
+    np.random.seed(0x4d3d3d3)
+    Nx, Ny, Nz = 32, 18, 24
+    # Note what we're doing here -- we are creating a randomly spaced mesh, but
+    # because of how the accumulate operation works, we also reset the leftmost
+    # cell boundary to 0.0.
+    cell_x = np.random.random(Nx+1)
+    cell_x /= cell_x.sum()
+    cell_x = np.add.accumulate(cell_x)
+    cell_x[0] = 0.0
+
+    cell_y = np.random.random(Ny+1)
+    cell_y /= cell_y.sum()
+    cell_y = np.add.accumulate(cell_y)
+    cell_y[0] = 0.0
+
+    cell_z = np.random.random(Nz+1)
+    cell_z /= cell_z.sum()
+    cell_z = np.add.accumulate(cell_z)
+    cell_z[0] = 0.0
+
+    coords, conn = hexahedral_connectivity(cell_x, cell_y, cell_z)
+    data = {'random_field': np.random.random(Nx*Ny*Nz)}
+    bbox = np.array([ [0.0, 1.0], [0.0, 1.0], [0.0, 1.0] ])
+    ds = load_hexahedral_mesh(data, conn, coords, bbox)
+    dd = ds.h.all_data()
+    #raise RuntimeError
+    yield assert_almost_equal, dd["CellVolumeCode"].sum(dtype="float64"), 1.0
+    yield assert_equal, dd["Ones"].size, Nx * Ny * Nz
+    # Now we try it with a standard mesh
+    cell_x = np.linspace(0.0, 1.0, Nx+1)
+    cell_y = np.linspace(0.0, 1.0, Ny+1)
+    cell_z = np.linspace(0.0, 1.0, Nz+1)
+    coords, conn = hexahedral_connectivity(cell_x, cell_y, cell_z)
+    data = {'random_field': np.random.random(Nx*Ny*Nz)}
+    bbox = np.array([ [0.0, 1.0], [0.0, 1.0], [0.0, 1.0] ])
+    ds = load_hexahedral_mesh(data, conn, coords, bbox)
+    dd = ds.h.all_data()
+    yield assert_almost_equal, dd["CellVolumeCode"].sum(dtype="float64"), 1.0
+    yield assert_equal, dd["Ones"].size, Nx * Ny * Nz
+    yield assert_almost_equal, dd["dx"], 1.0/Nx
+    yield assert_almost_equal, dd["dy"], 1.0/Ny
+    yield assert_almost_equal, dd["dz"], 1.0/Nz

diff -r 4bde9f2ae227f0b5fcb04b95d26a11d1ac166c3d -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -114,7 +114,7 @@
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \
-    load_particles
+    load_particles, load_hexahedral_mesh
 
 from yt.frontends.sph.api import \
     OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \


https://bitbucket.org/yt_analysis/yt-3.0/commits/1a6831808308/
Changeset:   1a6831808308
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-26 00:37:07
Summary:     Implementing _read_fluid_selection for Boxlib codes.

It's not yet clear to me if we are better off incrementally seeking or just
seeking with respect to the start of the file.
Affected #:  2 files

diff -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 -r 1a68318083080751b989653c24b6284433971955 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -36,6 +36,8 @@
     get_box_grids_level
 from yt.geometry.selection_routines import \
     RegionSelector
+from yt.utilities.io_handler import \
+    io_registry
 
 from .definitions import \
     orion2enzoDict, \
@@ -59,11 +61,12 @@
 
 class BoxlibGrid(AMRGridPatch):
     _id_offset = 0
+    _offset = -1
 
     def __init__(self, grid_id, offset, filename = None,
                  hierarchy = None):
         super(BoxlibGrid, self).__init__(grid_id, filename, hierarchy)
-        self._offset = offset
+        self._base_offset = offset
         self._parent_id = []
         self._children_ids = []
 
@@ -96,6 +99,16 @@
         return [self.hierarchy.grids[cid - self._id_offset]
                 for cid in self._children_ids]
 
+    def _seek(self, f):
+        # This will either seek to the _offset or figure out the correct
+        # _offset.
+        if self._offset == -1:
+            f.seek(self._base_offset, os.SEEK_SET)
+            f.readline()
+            self._offset = f.tell()
+        else:
+            f.seek(self._offset)
+
 class BoxlibHierarchy(GridGeometryHandler):
     grid = BoxlibGrid
     def __init__(self, pf, data_style='boxlib_native'):
@@ -290,6 +303,9 @@
         GridGeometryHandler._setup_classes(self, dd)
         self.object_types.sort()
 
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
 class BoxlibStaticOutput(StaticOutput):
     """
     This class is a stripped down class that simply reads and parses

diff -r 31ede4c5c82dc1e7607c5990a251bd11674c60d9 -r 1a68318083080751b989653c24b6284433971955 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -15,35 +15,72 @@
 
 import os
 import numpy as np
-from yt.utilities.lib import read_castro_particles, read_and_seek
+from yt.utilities.lib import \
+    read_castro_particles, \
+    read_and_seek
 from yt.utilities.io_handler import \
            BaseIOHandler
+from yt.funcs import mylog, defaultdict
 
 class IOHandlerBoxlib(BaseIOHandler):
 
     _data_style = "boxlib_native"
 
-    def modify(self, field):
-        return field.swapaxes(0,2)
+    def __init__(self, pf, *args, **kwargs):
+        self.pf = pf
 
-    def _read_data(self, grid, field):
-        """
-        reads packed multiFABs output by BoxLib in "NATIVE" format.
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        rv = {}
+        for field in fields:
+            rv[field] = np.empty(size, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                    size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, fields)
+            for g in chunk.objs:
+                for field in fields:
+                    ftype, fname = field
+                    ds = data[g.id].pop(fname)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
+                ind += nd
+                data.pop(g.id)
+        return rv
 
-        """
-
-        filen = os.path.expanduser(grid.filename)
-        offset1 = grid._offset
-        dtype = grid.hierarchy._dtype
-        bpr = grid.hierarchy._dtype.itemsize
-        ne = grid.ActiveDimensions.prod()
-        field_index = grid.hierarchy.field_indexes[field]
-        offset2 = int(ne*bpr*field_index)
-
-        field = np.empty(grid.ActiveDimensions, dtype=dtype, order='F')
-        read_and_seek(filen, offset1, offset2, field, ne * bpr)
-
-        return field
+    def _read_chunk_data(self, chunk, fields):
+        data = {}
+        grids_by_file = defaultdict(list)
+        if len(chunk.objs) == 0: return data
+        for g in chunk.objs:
+            if g.filename is None:
+                continue
+            grids_by_file[g.filename].append(g)
+        dtype = self.pf.hierarchy._dtype
+        bpr = dtype.itemsize
+        field_indices = self.pf.hierarchy.field_indexes
+        field_list = set(f[1] for f in fields)
+        for filename in grids_by_file:
+            grids = grids_by_file[filename]
+            grids.sort(key = lambda a: a._offset)
+            f = open(filename, "rb")
+            for grid in grids:
+                data[grid.id] = {}
+                grid._seek(f)
+                count = grid.ActiveDimensions.prod()
+                size = count * bpr
+                for field in field_indices:
+                    if field in field_list:
+                        # We read it ...
+                        v = np.fromfile(f, dtype=dtype, count=count)
+                        v = v.reshape(grid.ActiveDimensions, order='F')
+                        data[grid.id][field] = v
+                    else:
+                        f.seek(size, os.SEEK_CUR)
+        return data
 
 class IOHandlerOrion(IOHandlerBoxlib):
     _data_style = "orion_native"


https://bitbucket.org/yt_analysis/yt-3.0/commits/ef7f24c0e576/
Changeset:   ef7f24c0e576
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-26 18:44:23
Summary:     Removing some of the level_Offset business and simplifying logic.
Affected #:  1 file

diff -r 1a68318083080751b989653c24b6284433971955 -r ef7f24c0e576c690ac566c6e7ac5649a0bdf4e88 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -109,6 +109,14 @@
         else:
             f.seek(self._offset)
 
+    # We override here because we can have varying refinement levels
+    def select_ires(self, dobj):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty(0, dtype='int64')
+        coords = np.empty(self._last_count, dtype='int64')
+        coords[:] = self.Level + self.pf.level_offsets[self.Level]
+        return coords
+
 class BoxlibHierarchy(GridGeometryHandler):
     grid = BoxlibGrid
     def __init__(self, pf, data_style='boxlib_native'):
@@ -255,13 +263,6 @@
             for child in grid.Children:
                 child._parent_id.append(i + grid._id_offset)
 
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return np.where(mask)
-
     def _count_grids(self):
         # We can get everything from the Header file, but note that we're
         # duplicating some work done elsewhere.  In a future where we don't
@@ -489,13 +490,29 @@
                                 header_file.readline().split()])
         if ref_factors.size == 0:
             # We use a default of two, as Nyx doesn't always output this value
-            ref_factors = [2]
+            ref_factors = [2] * self._max_level
         # We can't vary refinement factors based on dimension, or whatever else
         # they are vaied on.  In one curious thing, I found that some Castro 3D
         # data has only two refinement factors, which I don't know how to
         # understand.
-        assert(np.unique(ref_factors).size == 1)
-        self.refine_by = ref_factors[0]
+        self.ref_factors = ref_factors
+        if np.unique(ref_factors).size > 1:
+            # We want everything to be a multiple of this.
+            self.refine_by = min(ref_factors)
+            # Check that they're all multiples of the minimum.
+            if not all(float(rf)/self.refine_by ==
+                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                raise RuntimeError
+            base_log = np.log2(self.refine_by)
+            self.level_offsets = [0] # level 0 has to have 0 offset
+            lo = 0
+            for lm1, rf in enumerate(self.ref_factors):
+                lo += int(np.log2(rf) / base_log) - 1
+                self.level_offsets.append(lo)
+        #assert(np.unique(ref_factors).size == 1)
+        else:
+            self.refine_by = ref_factors[0]
+            self.level_offsets = [0 for l in range(self._max_level + 1)]
         # Now we read the global index space, to get 
         index_space = header_file.readline()
         # This will be of the form:


https://bitbucket.org/yt_analysis/yt-3.0/commits/30f9b4536566/
Changeset:   30f9b4536566
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-26 18:44:52
Summary:     Merging from tip, with grid refactor
Affected #:  26 files

diff -r ef7f24c0e576c690ac566c6e7ac5649a0bdf4e88 -r 30f9b4536566109133f6da6fd9b2ac528b560ca6 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -71,7 +71,7 @@
         """
         if self.start_index is not None:
             return self.start_index
-        if self.Parent == None:
+        if self.Parent is None:
             left = self.LeftEdge - self.pf.domain_left_edge
             start_index = left / self.dds
             return np.rint(start_index).astype('int64').ravel()
@@ -132,51 +132,6 @@
         if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
         if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
 
-    @property
-    def _corners(self):
-        return np.array([ # Unroll!
-            [self.LeftEdge[0],  self.LeftEdge[1],  self.LeftEdge[2]],
-            [self.RightEdge[0], self.LeftEdge[1],  self.LeftEdge[2]],
-            [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
-            [self.RightEdge[0], self.RightEdge[1], self.RightEdge[2]],
-            [self.LeftEdge[0],  self.RightEdge[1], self.RightEdge[2]],
-            [self.LeftEdge[0],  self.LeftEdge[1],  self.RightEdge[2]],
-            [self.RightEdge[0], self.LeftEdge[1],  self.RightEdge[2]],
-            [self.LeftEdge[0],  self.RightEdge[1], self.LeftEdge[2]],
-            ], dtype='float64')
-
-    def _generate_overlap_masks(self, axis, LE, RE):
-        """
-        Generate a mask that shows which cells overlap with arbitrary arrays
-        *LE* and *RE*) of edges, typically grids, along *axis*.
-        Use algorithm described at http://www.gamedev.net/reference/articles/article735.asp
-
-        """
-        x = x_dict[axis]
-        y = y_dict[axis]
-        cond = self.RightEdge[x] >= LE[:,x]
-        cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
-        cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
-        cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
-        return cond
-
-    def is_in_grid(self, x, y, z) :
-        """
-        Generate a mask that shows which points in *x*, *y*, and *z*
-        fall within this grid's boundaries.
-        """
-        xcond = np.logical_and(x >= self.LeftEdge[0],
-                               x < self.RightEdge[0])
-        ycond = np.logical_and(y >= self.LeftEdge[1],
-                               y < self.RightEdge[1])
-        zcond = np.logical_and(z >= self.LeftEdge[2],
-                               z < self.RightEdge[2])
-
-        cond = np.logical_and(xcond, ycond)
-        cond = np.logical_and(zcond, cond)
-
-        return cond
-        
     def __repr__(self):
         return "AMRGridPatch_%04i" % (self.id)
 
@@ -190,13 +145,8 @@
 
         """
         super(AMRGridPatch, self).clear_data()
-        self._del_child_mask()
-        self._del_child_indices()
         self._setup_dx()
 
-    def check_child_masks(self):
-        return self._child_mask, self._child_indices
-
     def _prepare_grid(self):
         """ Copies all the appropriate attributes from the hierarchy. """
         # This is definitely the slowest part of generating the hierarchy
@@ -212,89 +162,12 @@
         #self.Time = h.gridTimes[my_ind,0]
         self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
 
-    def find_max(self, field):
-        """ Returns value, index of maximum value of *field* in this grid. """
-        coord1d = (self[field] * self.child_mask).argmax()
-        coord = np.unravel_index(coord1d, self[field].shape)
-        val = self[field][coord]
-        return val, coord
-
-    def find_min(self, field):
-        """ Returns value, index of minimum value of *field* in this grid. """
-        coord1d = (self[field] * self.child_mask).argmin()
-        coord = np.unravel_index(coord1d, self[field].shape)
-        val = self[field][coord]
-        return val, coord
-
     def get_position(self, index):
         """ Returns center position of an *index*. """
         pos = (index + 0.5) * self.dds + self.LeftEdge
         return pos
 
-    def clear_all(self):
-        """
-        Clears all datafields from memory and calls
-        :meth:`clear_derived_quantities`.
-
-        """
-        for key in self.keys():
-            del self.field_data[key]
-        del self.field_data
-        if hasattr(self,"retVal"):
-            del self.retVal
-        self.field_data = YTFieldData()
-        self.clear_derived_quantities()
-        del self.child_mask
-        del self.child_ind
-
-    def _set_child_mask(self, newCM):
-        if self._child_mask != None:
-            mylog.warning("Overriding child_mask attribute!  This is probably unwise!")
-        self._child_mask = newCM
-
-    def _set_child_indices(self, newCI):
-        if self._child_indices != None:
-            mylog.warning("Overriding child_indices attribute!  This is probably unwise!")
-        self._child_indices = newCI
-
-    def _get_child_mask(self):
-        if self._child_mask == None:
-            self.__generate_child_mask()
-        return self._child_mask
-
-    def _get_child_indices(self):
-        if self._child_indices == None:
-            self.__generate_child_mask()
-        return self._child_indices
-
-    def _del_child_indices(self):
-        try:
-            del self._child_indices
-        except AttributeError:
-            pass
-        self._child_indices = None
-
-    def _del_child_mask(self):
-        try:
-            del self._child_mask
-        except AttributeError:
-            pass
-        self._child_mask = None
-
-    def _get_child_index_mask(self):
-        if self._child_index_mask is None:
-            self.__generate_child_index_mask()
-        return self._child_index_mask
-
-    def _del_child_index_mask(self):
-        try:
-            del self._child_index_mask
-        except AttributeError:
-            pass
-        self._child_index_mask = None
-
-    #@time_execution
-    def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
+    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
         rf = self.pf.refine_by
         if dlevel != 1:
             rf = rf**dlevel
@@ -307,61 +180,37 @@
              startIndex[1]:endIndex[1],
              startIndex[2]:endIndex[2]] = tofill
 
-    def __generate_child_mask(self):
+    @property
+    def child_mask(self):
         """
         Generates self.child_mask, which is zero where child grids exist (and
         thus, where higher resolution data is available).
 
         """
-        self._child_mask = np.ones(self.ActiveDimensions, 'bool')
+        child_mask = np.ones(self.ActiveDimensions, 'bool')
         for child in self.Children:
-            self.__fill_child_mask(child, self._child_mask, 0)
-        if self.OverlappingSiblings is not None:
-            for sibling in self.OverlappingSiblings:
-                self.__fill_child_mask(sibling, self._child_mask, 0)
-        
-        self._child_indices = (self._child_mask==0) # bool, possibly redundant
+            self._fill_child_mask(child, child_mask, 0)
+        for sibling in self.OverlappingSiblings or []:
+            self._fill_child_mask(sibling, child_mask, 0)
+        return child_mask
 
-    def __generate_child_index_mask(self):
+    @property
+    def child_indices(self):
+        return (self.child_mask == 0)
+
+    @property
+    def child_index_mask(self):
         """
         Generates self.child_index_mask, which is -1 where there is no child,
         and otherwise has the ID of the grid that resides there.
 
         """
-        self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
+        child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
         for child in self.Children:
-            self.__fill_child_mask(child, self._child_index_mask,
-                                   child.id)
-        if self.OverlappingSiblings is not None:
-            for sibling in self.OverlappingSiblings:
-                self.__fill_child_mask(sibling, self._child_index_mask,
-                                       sibling.id)
-
-    def _get_coords(self):
-        if self.__coords == None: self._generate_coords()
-        return self.__coords
-
-    def _set_coords(self, new_c):
-        if self.__coords != None:
-            mylog.warning("Overriding coords attribute!  This is probably unwise!")
-        self.__coords = new_c
-
-    def _del_coords(self):
-        del self.__coords
-        self.__coords = None
-
-    def _generate_coords(self):
-        """
-        Creates self.coords, which is of dimensions (3, ActiveDimensions)
-
-        """
-        ind = np.indices(self.ActiveDimensions)
-        left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
-        self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
-
-    child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
-    child_index_mask = property(fget=_get_child_index_mask, fdel=_del_child_index_mask)
-    child_indices = property(fget=_get_child_indices, fdel = _del_child_indices)
+            self._fill_child_mask(child, child_index_mask, child.id)
+        for sibling in self.OverlappingSiblings or []:
+            self._fill_child_mask(sibling, child_index_mask, sibling.id)
+        return child_index_mask
 
     def retrieve_ghost_zones(self, n_zones, fields, all_levels=False,
                              smoothed=False):

diff -r ef7f24c0e576c690ac566c6e7ac5649a0bdf4e88 -r 30f9b4536566109133f6da6fd9b2ac528b560ca6 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -30,6 +30,16 @@
 import yt.geometry.particle_smooth as particle_smooth
 from yt.funcs import *
 
+def cell_count_cache(func):
+    def cc_cache_func(self, dobj):
+        if hash(dobj.selector) != self._last_selector_id:
+            self._cell_count = -1
+        rv = func(self, dobj)
+        self._cell_count = rv.shape[0]
+        self._last_selector_id = hash(dobj.selector)
+        return rv
+    return cc_cache_func
+
 class OctreeSubset(YTSelectionContainer):
     _spatial = True
     _num_ghost_zones = 0
@@ -38,7 +48,7 @@
     _con_args = ('base_region', 'domain', 'pf')
     _container_fields = ("dx", "dy", "dz")
     _domain_offset = 0
-    _num_octs = -1
+    _cell_count = -1
 
     def __init__(self, base_region, domain, pf, over_refine_factor = 1):
         self._num_zones = 1 << (over_refine_factor)
@@ -161,37 +171,25 @@
             vals = np.asfortranarray(vals)
         return vals
 
+    @cell_count_cache
     def select_icoords(self, dobj):
-        d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fcoords(self, dobj):
-        d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
-                                     num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fcoords(dobj.selector, domain_id = self.domain_id,
+                                        num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_fwidth(self, dobj):
-        d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.fwidth(dobj.selector, domain_id = self.domain_id,
+                                       num_cells = self._cell_count)
 
+    @cell_count_cache
     def select_ires(self, dobj):
-        d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
-                                  num_octs = self._num_octs)
-        self._num_octs = d.shape[0] / (self.nz**3)
-        tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
-                                            domain_id = self.domain_id)
-        return tr
+        return self.oct_handler.ires(dobj.selector, domain_id = self.domain_id,
+                                     num_cells = self._cell_count)
 
     def select(self, selector, source, dest, offset):
         n = self.oct_handler.selector_fill(selector, source, dest, offset,
@@ -199,11 +197,7 @@
         return n
 
     def count(self, selector):
-        if hash(selector) == self._last_selector_id:
-            if self._last_mask is None: return 0
-            return self._last_mask.sum()
-        self.select(selector)
-        return self.count(selector)
+        return -1
 
     def count_particles(self, selector, x, y, z):
         # We don't cache the selector results

diff -r ef7f24c0e576c690ac566c6e7ac5649a0bdf4e88 -r 30f9b4536566109133f6da6fd9b2ac528b560ca6 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -141,50 +141,6 @@
         else:
             raise KeyError(field)
 
-    def _get_data_from_grid(self, grid, field):
-        if self.pf.geometry == "cylindrical":
-            if grid.id in self._masks:
-                mask = self._masks[grid.id] 
-            else:
-                mask = self._get_cut_mask(grid)
-            ts, dts = self._ts[grid.id], self._dts[grid.id]
-        else:
-            mask = np.logical_and(self._get_cut_mask(grid), grid.child_mask)
-            ts, dts = self._ts[grid.id][mask], self._dts[grid.id][mask]
-
-        if field == 'dts':
-            return dts
-        if field == 't': 
-            return ts
-
-        gf = grid[field]
-        if not iterable(gf):
-            gf = gf * np.ones(grid.child_mask.shape)
-        return gf[mask]
-
-    def _get_cut_mask(self, grid):
-        if self.pf.geometry == "cylindrical":
-            _ = clyindrical_ray_trace(self.start_point, self.end_point, 
-                                      grid.LeftEdge, grid.RightEdge)
-            ts, s, rzt, mask = _
-            dts = np.empty(ts.shape, dtype='float64')
-            dts[0], dts[1:] = 0.0, ts[1:] - ts[:-1]
-            grid['r'], grid['z'], grid['theta'] = rzt[:,0], rzt[:,1], rzt[:,2]
-            grid['s'] = s
-        else:
-            mask = np.zeros(grid.ActiveDimensions, dtype='int')
-            dts = np.zeros(grid.ActiveDimensions, dtype='float64')
-            ts = np.zeros(grid.ActiveDimensions, dtype='float64')
-            VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
-                           grid.dds, self.center, self.vec)
-            dts = np.abs(dts) 
-            ts = np.abs(ts)
-        self._dts[grid.id] = dts
-        self._ts[grid.id] = ts
-        self._masks[grid.id] = mask
-        return mask
-
-
 class YTSliceBase(YTSelectionContainer2D):
     """
     This is a data object corresponding to a slice through the simulation
@@ -249,10 +205,6 @@
         else:
             raise KeyError(field)
 
-    def _gen_node_name(self):
-        return "%s/%s_%s" % \
-            (self._top_node, self.axis, self.coord)
-
     @property
     def _mrep(self):
         return MinimalSliceData(self)
@@ -520,156 +472,6 @@
         frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
         return frb
 
-class YTFixedResCuttingPlaneBase(YTSelectionContainer2D):
-    """
-    The fixed resolution Cutting Plane slices at an oblique angle,
-    where we use the *normal* vector at the *center* to define the
-    viewing plane.  The plane is *width* units wide.  The 'up'
-    direction is guessed at automatically if not given.
-    """
-    _top_node = "/FixedResCuttingPlanes"
-    _type_name = "fixed_res_cutting"
-    _con_args = ('normal', 'center', 'width', 'dims')
-    def __init__(self, normal, center, width, dims, pf = None,
-                 node_name = None, field_parameters = None):
-        #
-        # Taken from Cutting Plane
-        #
-        YTSelectionContainer2D.__init__(self, 4, pf, field_parameters)
-        self._set_center(center)
-        self.width = width
-        self.dims = dims
-        self.dds = self.width / self.dims
-        self.bounds = np.array([0.0,1.0,0.0,1.0])
-
-        self.set_field_parameter('center', center)
-        # Let's set up our plane equation
-        # ax + by + cz + d = 0
-        self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
-        self._d = -1.0 * np.dot(self._norm_vec, self.center)
-        # First we try all three, see which has the best result:
-        vecs = np.identity(3)
-        _t = np.cross(self._norm_vec, vecs).sum(axis=1)
-        ax = _t.argmax()
-        self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
-        self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
-        self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
-        self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
-        self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
-        self._inv_mat = np.linalg.pinv(self._rot_mat)
-        self.set_field_parameter('cp_x_vec',self._x_vec)
-        self.set_field_parameter('cp_y_vec',self._y_vec)
-        self.set_field_parameter('cp_z_vec',self._norm_vec)
-
-        # Calculate coordinates of each pixel
-        _co = self.dds * \
-              (np.mgrid[-self.dims/2 : self.dims/2,
-                        -self.dims/2 : self.dims/2] + 0.5)
-        self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
-                      np.outer(_co[1,:,:], self._y_vec)
-        self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
-        if node_name is not False:
-            if node_name is True: self._deserialize()
-            else: self._deserialize(node_name)
-
-    @property
-    def normal(self):
-        return self._norm_vec
-
-    def _get_list_of_grids(self):
-        # Just like the Cutting Plane but restrict the grids to be
-        # within width/2 of the center.
-        vertices = self.hierarchy.gridCorners
-        # Shape = (8,3,n_grid)
-        D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
-        valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
-                                              np.all(D>0,axis=0) ))[0]
-        # Now restrict these grids to a rect. prism that bounds the slice
-        sliceCorners = np.array([ \
-            self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
-            self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
-            self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
-            self.center + 0.5*self.width * (-self._x_vec + self._y_vec) ])
-        sliceLeftEdge = sliceCorners.min(axis=0)
-        sliceRightEdge = sliceCorners.max(axis=0)
-        # Check for bounding box and grid overlap
-        leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
-                              sliceRightEdge).all(axis=1)
-        rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
-                                  sliceLeftEdge).all(axis=1)
-        self._grids = self.hierarchy.grids[valid_grids[
-            np.where(leftOverlap & rightOverlap)]]
-        self._grids = self._grids[::-1]
-
-    def _generate_coords(self):
-        self['px'] = self._coord[:,0].ravel()
-        self['py'] = self._coord[:,1].ravel()
-        self['pz'] = self._coord[:,2].ravel()
-        self['pdx'] = self.dds * 0.5
-        self['pdy'] = self.dds * 0.5
-        #self['pdz'] = self.dds * 0.5
-
-    def _get_data_from_grid(self, grid, field):
-        if not self.pf.field_info[field].particle_type:
-            pointI = self._get_point_indices(grid)
-            if len(pointI) == 0: return
-            vc = self._calc_vertex_centered_data(grid, field)
-            bds = np.array(zip(grid.LeftEdge,
-                               grid.RightEdge)).ravel()
-            interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
-            self[field][pointI] = interp( \
-                dict(x=self._coord[pointI,0],
-                     y=self._coord[pointI,1],
-                     z=self._coord[pointI,2])).ravel()
-
-            # Mark these pixels to speed things up
-            self._pixelmask[pointI] = 0
-
-            return
-        else:
-            raise SyntaxError("Making a fixed resolution slice with "
-                              "particles isn't supported yet.")
-
-    def get_data(self, fields):
-        """
-        Iterates over the list of fields and generates/reads them all.
-        """
-        self._get_list_of_grids()
-        if not self.has_key('pdx'):
-            self._generate_coords()
-        fields_to_get = ensure_list(fields)
-        temp_data = {}
-        _size = self.dims * self.dims
-        for field in fields_to_get:
-            if self.field_data.has_key(field): continue
-            if field not in self.hierarchy.field_list:
-                if self._generate_field(field):
-                    continue # A "True" return means we did it
-            self[field] = np.zeros(_size, dtype='float64')
-            for grid in self._get_grids():
-                self._get_data_from_grid(grid, field)
-            self[field] = self.comm.mpi_allreduce(\
-                self[field], op='sum').reshape([self.dims]*2).transpose()
-
-    def _calc_vertex_centered_data(self, grid, field):
-        #return grid.retrieve_ghost_zones(1, field, smoothed=False)
-        return grid.get_vertex_centered_data(field)
-
-    def _get_point_indices(self, grid):
-        if self._pixelmask.max() == 0: return []
-        k = planar_points_in_volume(self._coord, self._pixelmask,
-                                    grid.LeftEdge, grid.RightEdge,
-                                    grid.child_mask, just_one(grid['dx']))
-        return k
-
-    def _gen_node_name(self):
-        cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
-        L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
-        return "%s/c%s_L%s" % \
-            (self._top_node, cen_name, L_name)
-
-
 class YTDiskBase(YTSelectionContainer3D):
     """
     By providing a *center*, a *normal*, a *radius* and a *height* we
@@ -687,113 +489,6 @@
         self._radius = fix_length(radius, self.pf)
         self._d = -1.0 * np.dot(self._norm_vec, self.center)
 
-    def _get_list_of_grids(self):
-        H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
-                   axis=1) + self._d
-        D = np.sqrt(np.sum((self.pf.h.grid_corners -
-                           self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = np.sqrt(D**2.0-H**2.0)
-        self._grids = self.hierarchy.grids[
-            ( (np.any(np.abs(H)<self._height,axis=0))
-            & (np.any(R<self._radius,axis=0)
-            & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
-            ) ) ]
-        self._grids = self.hierarchy.grids
-
-    def _is_fully_enclosed(self, grid):
-        corners = grid._corners.reshape((8,3,1))
-        H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
-                   axis=1) + self._d
-        D = np.sqrt(np.sum((corners -
-                           self.center.reshape((1,3,1)))**2.0,axis=1))
-        R = np.sqrt(D**2.0-H**2.0)
-        return (np.all(np.abs(H) < self._height, axis=0) \
-            and np.all(R < self._radius, axis=0))
-
-    def _get_cut_mask(self, grid):
-        if self._is_fully_enclosed(grid):
-            return True
-        else:
-            h = grid['x'] * self._norm_vec[0] \
-              + grid['y'] * self._norm_vec[1] \
-              + grid['z'] * self._norm_vec[2] \
-              + self._d
-            d = np.sqrt(
-                (grid['x'] - self.center[0])**2.0
-              + (grid['y'] - self.center[1])**2.0
-              + (grid['z'] - self.center[2])**2.0
-                )
-            r = np.sqrt(d**2.0-h**2.0)
-            cm = ( (np.abs(h) <= self._height)
-                 & (r <= self._radius))
-        return cm
-
-
-class YTInclinedBoxBase(YTSelectionContainer3D):
-    """
-    A rectangular prism with arbitrary alignment to the computational
-    domain.  *origin* is the origin of the box, while *box_vectors* is an
-    array of ordering [ax, ijk] that describes the three vectors that
-    describe the box.  No checks are done to ensure that the box satisfies
-    a right-hand rule, but if it doesn't, behavior is undefined.
-    """
-    _type_name="inclined_box"
-    _con_args = ('origin','box_vectors')
-
-    def __init__(self, origin, box_vectors, fields=None,
-                 pf=None, **kwargs):
-        self.origin = np.array(origin)
-        self.box_vectors = np.array(box_vectors, dtype='float64')
-        self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
-        center = origin + 0.5*self.box_vectors.sum(axis=0)
-        YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
-        self._setup_rotation_parameters()
-
-    def _setup_rotation_parameters(self):
-        xv = self.box_vectors[0,:]
-        yv = self.box_vectors[1,:]
-        zv = self.box_vectors[2,:]
-        self._x_vec = xv / np.sqrt(np.dot(xv, xv))
-        self._y_vec = yv / np.sqrt(np.dot(yv, yv))
-        self._z_vec = zv / np.sqrt(np.dot(zv, zv))
-        self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
-        self._inv_mat = np.linalg.pinv(self._rot_mat)
-
-    def _get_list_of_grids(self):
-        if self._grids is not None: return
-        GLE = self.pf.h.grid_left_edge
-        GRE = self.pf.h.grid_right_edge
-        goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
-                                           GLE, GRE)
-        cgrids = self.pf.h.grids[goodI.astype('bool')]
-       # find_grids_in_inclined_box seems to be broken.
-        cgrids = self.pf.h.grids[:]
-        grids = []
-        for i,grid in enumerate(cgrids):
-            v = grid_points_in_volume(self.box_lengths, self.origin,
-                                      self._rot_mat, grid.LeftEdge,
-                                      grid.RightEdge, grid.dds,
-                                      grid.child_mask, 1)
-            if v: grids.append(grid)
-        self._grids = np.empty(len(grids), dtype='object')
-        for gi, g in enumerate(grids): self._grids[gi] = g
-
-
-    def _is_fully_enclosed(self, grid):
-        # This should be written at some point.
-        # We'd rotate all eight corners into the space of the box, then check to
-        # see if all are enclosed.
-        return False
-
-    def _get_cut_mask(self, grid):
-        if self._is_fully_enclosed(grid):
-            return True
-        pm = np.zeros(grid.ActiveDimensions, dtype='int32')
-        grid_points_in_volume(self.box_lengths, self.origin,
-                              self._rot_mat, grid.LeftEdge,
-                              grid.RightEdge, grid.dds, pm, 0)
-        return pm
-
 
 class YTRegionBase(YTSelectionContainer3D):
     """A 3D region of data with an arbitrary center.

diff -r ef7f24c0e576c690ac566c6e7ac5649a0bdf4e88 -r 30f9b4536566109133f6da6fd9b2ac528b560ca6 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1,17 +1,15 @@
-"""
-
-"""
 cimport cython
 import numpy as np
 cimport numpy as np
 import sys 
 
 from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.utilities.lib.fp_utils cimport imax
 from yt.geometry.oct_container cimport \
-    OctreeContainer, OctAllocationContainer, \
     SparseOctreeContainer
 from yt.geometry.oct_visitors cimport \
-    OctVisitorData, oct_visitor_function, Oct
+    OctVisitorData, oct_visitor_function, Oct, \
+    fill_file_indices_oind, fill_file_indices_rind
 from yt.geometry.particle_deposit cimport \
     ParticleDepositOperation
 from libc.stdint cimport int32_t, int64_t
@@ -66,6 +64,9 @@
     int artio_fileset_open_grid(artio_fileset_handle *handle) 
     int artio_fileset_close_grid(artio_fileset_handle *handle) 
 
+    int artio_fileset_has_grid( artio_fileset_handle *handle )
+    int artio_fileset_has_particles( artio_fileset_handle *handle )
+
     # selection functions
     artio_selection *artio_selection_allocate( artio_fileset_handle *handle )
     artio_selection *artio_select_all( artio_fileset_handle *handle )
@@ -136,12 +137,14 @@
     cdef int64_t sfc_min, sfc_max
 
     # grid attributes
+    cdef public int has_grid
     cdef public int min_level, max_level
     cdef public int num_grid_variables
     cdef int *num_octs_per_level
     cdef float *grid_variables
 
     # particle attributes
+    cdef public int has_particles
     cdef public int num_species
     cdef int *particle_position_index
     cdef int *num_particles_per_species
@@ -178,32 +181,48 @@
         if (not self.num_octs_per_level) or (not self.grid_variables) :
             raise MemoryError
 
-        status = artio_fileset_open_grid( self.handle )
-        check_artio_status(status)
+        if artio_fileset_has_grid(self.handle):
+            status = artio_fileset_open_grid(self.handle)
+            check_artio_status(status)
+            self.has_grid = 1
+        else:
+            self.has_grid = 0
 
         # particle detection
-        self.num_species = self.parameters['num_particle_species'][0]
-        self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
-        if not self.particle_position_index :
-            raise MemoryError
-        for ispec in range(self.num_species) :
-            labels = self.parameters["species_%02d_primary_variable_labels"% (ispec,)]
-            try :
-                self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
-                self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
-                self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
-            except ValueError :
-                raise RuntimeError("Unable to locate position information for particle species", ispec )
+        if ( artio_fileset_has_particles(self.handle) ):
+            status = artio_fileset_open_particles(self.handle)
+            check_artio_status(status)
+            self.has_particles = 1
 
-        self.num_particles_per_species =  <int *>malloc(sizeof(int)*self.num_species) 
-        self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))  
-        self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))  
-        if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
-            raise MemoryError
+            for v in ["num_particle_species","num_primary_variables","num_secondary_variables"]:
+                if not self.parameters.has_key(v):
+                    raise RuntimeError("Unable to locate particle header information in artio header: key=", v)
 
-        status = artio_fileset_open_particles( self.handle )
-        check_artio_status(status)
-   
+            self.num_species = self.parameters['num_particle_species'][0]
+            self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
+            if not self.particle_position_index :
+                raise MemoryError
+            for ispec in range(self.num_species) :
+                species_labels = "species_%02d_primary_variable_labels"% (ispec,)
+                if not self.parameters.has_key(species_labels):
+                    raise RuntimeError("Unable to locate variable labels for species",ispec)
+
+                labels = self.parameters[species_labels]
+                try :
+                    self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
+                    self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
+                    self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
+                except ValueError :
+                    raise RuntimeError("Unable to locate position information for particle species", ispec)
+    
+            self.num_particles_per_species =  <int *>malloc(sizeof(int)*self.num_species) 
+            self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))  
+            self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))  
+            if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
+                raise MemoryError
+        else:
+            self.has_particles = 0
+
     def __dealloc__(self) :
         if self.num_octs_per_level : free(self.num_octs_per_level)
         if self.grid_variables : free(self.grid_variables)
@@ -390,7 +409,6 @@
                 raise RuntimeError("Field",f,"is not known to ARTIO")
             field_order[i] = var_labels.index(f)
 
-        # dhr - cache the entire domain (replace later)
         status = artio_grid_cache_sfc_range( self.handle, self.sfc_min, self.sfc_max )
         check_artio_status(status) 
 
@@ -530,6 +548,85 @@
         artio_fileset_close(handle) 
     return True
 
+cdef class ARTIOSFCRangeHandler:
+    cdef public np.int64_t sfc_start
+    cdef public np.int64_t sfc_end
+    cdef public artio_fileset artio_handle
+    cdef public object root_mesh_handler
+    cdef public object oct_count
+    cdef public object octree_handler
+    cdef artio_fileset_handle *handle
+    cdef np.float64_t DLE[3]
+    cdef np.float64_t DRE[3]
+    cdef np.float64_t dds[3]
+    cdef np.int64_t dims[3]
+    cdef public np.int64_t total_octs
+
+    def __init__(self, domain_dimensions, # cells
+                 domain_left_edge,
+                 domain_right_edge,
+                 artio_fileset artio_handle,
+                 sfc_start, sfc_end):
+        cdef int i
+        self.sfc_start = sfc_start
+        self.sfc_end = sfc_end
+        self.artio_handle = artio_handle
+        self.root_mesh_handler = None
+        self.octree_handler = None
+        self.handle = artio_handle.handle
+        self.oct_count = None
+        for i in range(3):
+            self.dims[i] = domain_dimensions[i]
+            self.DLE[i] = domain_left_edge[i]
+            self.DRE[i] = domain_right_edge[i]
+            self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
+
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def construct_mesh(self):
+        cdef int status, level
+        cdef np.int64_t sfc, oc
+        cdef double dpos[3]
+        cdef int num_oct_levels
+        cdef int max_level = self.artio_handle.max_level
+        cdef int *num_octs_per_level = <int *>malloc(
+            (max_level + 1)*sizeof(int))
+        cdef ARTIOOctreeContainer octree
+        self.octree_handler = octree = ARTIOOctreeContainer(self)
+        # We want to pre-allocate an array of root pointers.  In the future,
+        # this will be pre-determined by the ARTIO library.  However, because
+        # realloc plays havoc with our tree searching, we can't utilize an
+        # expanding array at the present time.
+        octree.allocate_domains([], self.sfc_end - self.sfc_start + 1)
+        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        oct_count = np.zeros(self.sfc_end - self.sfc_start + 1, dtype="int64")
+        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+                                            self.sfc_end)
+        check_artio_status(status) 
+        for sfc in range(self.sfc_start, self.sfc_end + 1):
+            status = artio_grid_read_root_cell_begin( self.handle,
+                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+            check_artio_status(status)
+            if num_oct_levels > 0:
+                oc = 0
+                for level in range(num_oct_levels):
+                    oc += num_octs_per_level[level]
+                self.total_octs += oc
+                oct_count[sfc - self.sfc_start] = oc
+                octree.initialize_local_mesh(oc, num_oct_levels,
+                    num_octs_per_level, sfc)
+            status = artio_grid_read_root_cell_end( self.handle )
+            check_artio_status(status)
+        free(num_octs_per_level)
+        self.root_mesh_handler = ARTIORootMeshContainer(self)
+        self.oct_count = oct_count
+
+    def free_mesh(self):
+        self.octree_handler = None
+        self.root_mesh_handler = None
+        self.oct_count = None
+
 def get_coords(artio_fileset handle, np.int64_t s):
     cdef int coords[3]
     artio_sfc_coords(handle.handle, s, coords)
@@ -563,163 +660,91 @@
     # the file again, despite knowing the indexing system already.  Because of
     # this, we will avoid creating it as long as possible.
 
-    cdef public np.int64_t sfc_start
-    cdef public np.int64_t sfc_end
     cdef public artio_fileset artio_handle
-    cdef Oct **root_octs
-    cdef np.int64_t *level_indices
+    cdef np.int64_t level_indices[32]
 
-    def __init__(self, oct_dimensions, domain_left_edge, domain_right_edge,
-                 int64_t sfc_start, int64_t sfc_end, artio_fileset artio_handle):
-        self.artio_handle = artio_handle
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
+    def __init__(self, ARTIOSFCRangeHandler range_handler):
+        self.artio_handle = range_handler.artio_handle
         # Note the final argument is partial_coverage, which indicates whether
         # or not an Oct can be partially refined.
-        super(ARTIOOctreeContainer, self).__init__(oct_dimensions,
-            domain_left_edge, domain_right_edge)
-        self.level_indices = NULL
-        self._initialize_root_mesh()
+        dims, DLE, DRE = [], [], []
+        for i in range(32):
+            self.level_indices[i] = 0
+        for i in range(3):
+            # range_handler has dims in cells, which is the same as the number
+            # of possible octs.  This is because we have a forest of octrees.
+            dims.append(range_handler.dims[i])
+            DLE.append(range_handler.DLE[i])
+            DRE.append(range_handler.DRE[i])
+        super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
+        self.artio_handle = range_handler.artio_handle
+        self.level_offset = 1
+        self.domains = NULL
+        self.root_nodes = NULL
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
-    def _initialize_root_mesh(self):
+    cdef void initialize_local_mesh(self, np.int64_t oct_count,
+                              int num_oct_levels, int *num_octs_per_level,
+                              np.int64_t sfc):
         # We actually will not be initializing the root mesh here, we will be
         # initializing the entire mesh between sfc_start and sfc_end.
-        cdef np.int64_t oct_ind, sfc, nadded, tot_octs, ipos
-        cdef np.uint8_t bits
-        cdef int status
+        cdef np.int64_t oct_ind, tot_octs, ipos, nadded
+        cdef int i, status, level, num_root, num_octs
+        cdef int num_level_octs
         cdef artio_fileset_handle *handle = self.artio_handle.handle
+        cdef int coords[3]
+        cdef int max_level = self.artio_handle.max_level
         cdef double dpos[3]
-        cdef int coords[3]
-        cdef int num_oct_levels, level, i, j
-        cdef int max_level = self.artio_handle.max_level
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
-        cdef np.int64_t *tot_octs_per_level = <np.int64_t *>malloc(
-            (max_level + 1)*sizeof(np.int64_t))
-        self.level_indices = <np.int64_t *>malloc(
-            (max_level + 1)*sizeof(np.int64_t))
-        for level in range(max_level + 1):
-            tot_octs_per_level[level] = 0
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
-        # Now we iterate and create them, level by level.
-        # Note that we are doing a bit of magic to figure out how many root
-        # nodes we will need at most
-        cdef int nmask = self.nn[0] * self.nn[1] * self.nn[2] / 8
-        cdef np.uint8_t *mask = <np.uint8_t *> malloc(
-            self.nn[0] * self.nn[1] * self.nn[2]) # one bit for each one
-        for i in range(nmask): mask[i] = 0
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, NULL, &num_oct_levels, num_octs_per_level )
-            check_artio_status(status) 
-            artio_sfc_coords(handle, sfc, coords)
-            # Now we mask that bit
-            for i in range(3):
-                coords[i] = <int> (coords[i]/2)
-            ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
-            bits = ipos % 8
-            mask[ <int> (ipos/8) ] |= (1 << bits)
-            for level in range(1, num_oct_levels+1):
-                # Now we are simply counting so we can pre-allocate arrays.
-                # Because the grids have all been cached this should be fine.
-                tot_octs_per_level[level] += num_octs_per_level[level-1]
-            status = artio_grid_read_root_cell_end( handle )
+        cdef np.float64_t f64pos[3], dds[3]
+        cdef np.ndarray[np.float64_t, ndim=2] pos
+        # NOTE: We do not cache any SFC ranges here, as we should only ever be
+        # called from within a pre-cached operation in the SFC handler.
+
+        # We only allow one root oct.
+        self.append_domain(oct_count)
+        self.domains[self.num_domains - 1].con_id = sfc
+
+        oct_ind = -1
+        ipos = 0
+        for level in range(num_oct_levels):
+            oct_ind = imax(oct_ind, num_octs_per_level[level])
+            self.level_indices[level] = ipos
+            ipos += num_octs_per_level[level]
+        pos = np.empty((oct_ind, 3), dtype="float64")
+
+        # Now we initialize
+        # Note that we also assume we have already started reading the level.
+        ipos = 0
+        for level in range(num_oct_levels):
+            status = artio_grid_read_level_begin(handle, level + 1)
             check_artio_status(status)
-        cdef np.int64_t num_root = 0
-        for i in range(nmask):
-            for j in range(8):
-                num_root += ((mask[i] >> j) & 1)
-        tot_octs_per_level[0] = num_root
-        cdef np.int64_t tot = 0
-        for i in range(max_level + 1):
-            self.level_indices[i] = tot
-            tot += tot_octs_per_level[i]
-        self.allocate_domains([num_root, tot - num_root], num_root)
-        # Now we have everything counted, and we need to create the appropriate
-        # number of arrays.
-        cdef np.ndarray[np.float64_t, ndim=2] pos
-        pos = np.empty((tot, 3), dtype="float64")
-        # We do a special case for level 0
-        cdef np.float64_t dds[3]
-        for i in range(3):
-            dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
-            status = artio_grid_read_root_cell_begin( handle, sfc, 
-                    dpos, NULL, &num_oct_levels, num_octs_per_level)
-            check_artio_status(status) 
-            artio_sfc_coords(handle, sfc, coords)
-            # Now we check if we have added yet or not
-            for i in range(3):
-                coords[i] = <int> (coords[i]/2)
-            ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
-            bits = ipos % 8
-            if ((mask[<int>(ipos/8)] >> bits) & 1) == 1:
-                # We add it here
+            for oct_ind in range(num_octs_per_level[level]):
+                status = artio_grid_read_oct(handle, dpos, NULL, NULL)
                 for i in range(3):
-                    dpos[i] = self.DLE[i] + (coords[i]+0.5)*dds[i]
-                    pos[self.level_indices[0], i] = dpos[i]
-                mask[<int>(ipos/8)] -= (1 << bits)
-                self.level_indices[0] += 1
-            # Now we iterate over all the children
-            for level in range(1, num_oct_levels+1):
-                status = artio_grid_read_level_begin(handle, level)
-                check_artio_status(status) 
-                for oct_ind in range(num_octs_per_level[level - 1]):
-                    status = artio_grid_read_oct(handle, dpos, NULL, NULL)
-                    check_artio_status(status)
-                    for i in range(3):
-                        pos[self.level_indices[level], i] = dpos[i]
-                    self.level_indices[level] += 1
-                status = artio_grid_read_level_end(handle)
+                    pos[oct_ind, i] = dpos[i]
                 check_artio_status(status)
-            status = artio_grid_read_root_cell_end( handle )
+            status = artio_grid_read_level_end(handle)
             check_artio_status(status)
-        nadded = 0
-        cdef np.int64_t si, ei
-        si = 0
-        # We initialize domain to 1 so that all root mesh octs are viewed as
-        # not belonging to this domain.  This way we don't get confused with
-        # how the different meshes are interfaced, and the root mesh container
-        # will own all the root mesh octs.  Then, for all octs at higher
-        # levels, we use domain == 2.
-        cdef int domain = 1
-        for level in range(max_level + 1):
-            self.level_indices[level] = si
-            ei = si + tot_octs_per_level[level]
-            if tot_octs_per_level[level] == 0: break
-            nadded = self.add(domain, level, pos[si:ei, :])
-            if level > 0 and nadded != (ei - si):
-                print domain, self.sfc_start, self.sfc_end
-                print level, nadded, ei, si, self.max_root,
-                print self.level_indices[level]
-                print pos[si:ei,:]
-                print nadded, (ei - si), tot_octs_per_level[0]
+            nadded = self.add(self.num_domains, level, pos[:num_octs_per_level[level],:])
+            if nadded != num_octs_per_level[level]:
                 raise RuntimeError
-            si = ei
-            domain = 2
-        artio_grid_clear_sfc_cache(handle)
-        free(mask)
-        free(num_octs_per_level)
-        free(tot_octs_per_level)
 
     @cython.boundscheck(False)
     @cython.wraparound(False)
     @cython.cdivision(True)
     def fill_sfc(self, 
-                 np.ndarray[np.uint8_t, ndim=1] levels,
-                 np.ndarray[np.uint8_t, ndim=1] cell_inds,
-                 np.ndarray[np.int64_t, ndim=1] file_inds,
-                 field_indices, dest_fields):
+             np.ndarray[np.uint8_t, ndim=1] levels,
+             np.ndarray[np.uint8_t, ndim=1] cell_inds,
+             np.ndarray[np.int64_t, ndim=1] file_inds,
+             np.ndarray[np.int64_t, ndim=1] domain_counts,
+             field_indices, dest_fields):
         cdef np.ndarray[np.float32_t, ndim=2] source
         cdef np.ndarray[np.float64_t, ndim=1] dest
         cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
-        cdef int j, oct_ind, level
-        cdef np.int64_t sfc
+        cdef int level, j, oct_ind, si
+        cdef np.int64_t sfc, ipos
         cdef np.float64_t val
         cdef artio_fileset_handle *handle = self.artio_handle.handle
         cdef double dpos[3]
@@ -736,55 +761,61 @@
             nf * sizeof(int))
         cdef np.float32_t **field_vals = <np.float32_t**> malloc(
             nf * sizeof(np.float32_t*))
-        cdef np.int64_t *local_ind = <np.int64_t *> malloc(
-            (max_level + 1) * sizeof(np.int64_t))
-        for i in range(max_level + 1):
-            # This will help us keep track of where we are in the flattened
-            # array, which will be indexed by file_ind.
-            local_ind[i] = self.level_indices[i]
         source_arrays = []
+        ipos = -1
+        for i in range(self.num_domains):
+            ipos = imax(ipos, self.domains[i].n)
         for i in range(nf):
             field_ind[i] = field_indices[i]
-            # This zeros should be an empty once we handle the root grid
-            source = np.zeros((self.nocts, 8), dtype="float32")
+            # Note that we subtract one, because we're not using the root mesh.
+            source = np.zeros((ipos, 8), dtype="float32")
             source_arrays.append(source)
             field_vals[i] = <np.float32_t*> source.data
+        cdef np.int64_t level_position[32], lp
         # First we need to walk the mesh in the file.  Then we fill in the dest
         # location based on the file index.
-        status = artio_grid_cache_sfc_range(handle,
-            self.sfc_start, self.sfc_end )
-        check_artio_status(status) 
-        for sfc in range(self.sfc_start, self.sfc_end + 1):
+        # A few ways this could be improved:
+        #   * Create a new visitor function that actually queried the data,
+        #     rather than our somewhat hokey double-loop over SFC arrays.
+        #   * Go to pointers for the dest arrays.
+        #   * Enable preloading during mesh initialization
+        #   * Calculate domain indices on the fly rather than with a
+        #     double-loop to calculate domain_counts
+        cdef np.int64_t offset = 0 
+        for si in range(self.num_domains):
+            sfc = self.domains[si].con_id
             status = artio_grid_read_root_cell_begin( handle, sfc, 
                     dpos, NULL, &num_oct_levels, num_octs_per_level)
             check_artio_status(status) 
-            for level in range(1, num_oct_levels+1):
-                status = artio_grid_read_level_begin(handle, level)
+            lp = 0
+            for level in range(num_oct_levels):
+                status = artio_grid_read_level_begin(handle, level + 1)
                 check_artio_status(status) 
-                for oct_ind in range(num_octs_per_level[level - 1]):
+                level_position[level] = lp
+                for oct_ind in range(num_octs_per_level[level]):
                     status = artio_grid_read_oct(handle, dpos, grid_variables, NULL)
                     check_artio_status(status)
                     for j in range(8):
                         for i in range(nf):
-                            field_vals[i][local_ind[level] * 8 + j] = \
-                                grid_variables[ngv * j + field_ind[i]]
-                    local_ind[level] += 1
+                            field_vals[i][(oct_ind + lp)*8+j] = \
+                                grid_variables[ngv*j+field_ind[i]]
                 status = artio_grid_read_level_end(handle)
                 check_artio_status(status)
+                lp += num_octs_per_level[level]
             status = artio_grid_read_root_cell_end( handle )
             check_artio_status(status)
-        # Now we have all our sources.
-        artio_grid_clear_sfc_cache(handle)
-        for j in range(nf):
-            dest = dest_fields[j]
-            source = source_arrays[j]
-            for i in range(levels.shape[0]):
-                if levels[i] == 0: continue
-                oct_ind = self.level_indices[levels[i]]
-                dest[i] = source[file_inds[i] + oct_ind, cell_inds[i]]
+            # Now we have all our sources.
+            for j in range(nf):
+                dest = dest_fields[j]
+                source = source_arrays[j]
+                for i in range(domain_counts[si]):
+                    level = levels[i + offset]
+                    oct_ind = file_inds[i + offset] + level_position[level]
+                    dest[i + offset] = source[oct_ind, cell_inds[i + offset]]
+            # Now, we offset by the actual number filled here.
+            offset += domain_counts[si]
         free(field_ind)
         free(field_vals)
-        free(local_ind)
         free(grid_variables)
         free(num_octs_per_level)
 
@@ -952,11 +983,11 @@
         status = artio_particle_read_root_cell_end( handle )
         check_artio_status(status)
 
-    status = artio_particle_clear_sfc_cache(handle)
-    check_artio_status(status)
+    #status = artio_particle_clear_sfc_cache(handle)
+    #check_artio_status(status)
 
-    status = artio_grid_clear_sfc_cache(handle)
-    check_artio_status(status)
+    #status = artio_grid_clear_sfc_cache(handle)
+    #check_artio_status(status)
 
     free(num_octs_per_level)
     free(num_particles_per_species)
@@ -976,22 +1007,23 @@
     cdef artio_fileset_handle *handle
     cdef np.uint64_t sfc_start
     cdef np.uint64_t sfc_end
+    cdef public object _last_mask
+    cdef public object _last_selector_id
+    cdef ARTIOSFCRangeHandler range_handler
 
-    def __init__(self, domain_dimensions, # cells
-                 domain_left_edge,
-                 domain_right_edge,
-                 artio_fileset artio_handle,
-                 sfc_start, sfc_end):
-        self.artio_handle = artio_handle
-        self.handle = artio_handle.handle
+    def __init__(self, ARTIOSFCRangeHandler range_handler):
         cdef int i
         for i in range(3):
-            self.dims[i] = domain_dimensions[i]
-            self.DLE[i] = domain_left_edge[i]
-            self.DRE[i] = domain_right_edge[i]
-            self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
-        self.sfc_start = sfc_start
-        self.sfc_end = sfc_end
+            self.DLE[i] = range_handler.DLE[i]
+            self.DRE[i] = range_handler.DRE[i]
+            self.dims[i] = range_handler.dims[i]
+            self.dds[i] = range_handler.dds[i]
+        self.handle = range_handler.handle
+        self.artio_handle = range_handler.artio_handle
+        self._last_mask = self._last_selector_id = None
+        self.sfc_start = range_handler.sfc_start
+        self.sfc_end = range_handler.sfc_end
+        self.range_handler = range_handler
 
     @cython.cdivision(True)
     cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1019,17 +1051,16 @@
         cdef int i
         return self.mask(selector).sum()
 
-    def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="int64")
+        coords = np.empty((num_cells, 3), dtype="int64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1042,18 +1073,17 @@
             filled += 1
         return coords
 
-    def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
-        # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
+        # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
         cdef np.int64_t sfc
         cdef np.float64_t pos[3]
         cdef int acoords[3], i
-        # We call it num_octs, but it's really num_cells.
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] coords
-        coords = np.empty((num_octs, 3), dtype="float64")
+        coords = np.empty((num_cells, 3), dtype="float64")
         cdef int filled = 0
         for sfc in range(self.sfc_start, self.sfc_end + 1):
             if mask[sfc - self.sfc_start] == 0: continue
@@ -1066,27 +1096,30 @@
             filled += 1
         return coords
 
-    def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef int i
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.float64_t, ndim=2] width
-        width = np.zeros((num_octs, 3), dtype="float64")
+        width = np.zeros((num_cells, 3), dtype="float64")
         for i in range(3):
             width[:,i] = self.dds[i]
         return width
 
-    def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+    def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
                 int domain_id = -1):
         cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
         mask = self.mask(selector)
-        num_octs = mask.sum()
+        num_cells = mask.sum()
         cdef np.ndarray[np.int64_t, ndim=1] res
-        res = np.zeros(num_octs, dtype="int64")
+        res = np.zeros(num_cells, dtype="int64")
         return res
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
     def selector_fill(self, SelectorObject selector,
                       np.ndarray source,
                       np.ndarray dest = None,
@@ -1131,42 +1164,30 @@
             return dest
         return filled
 
-    def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
-        cdef int i, status
-        cdef double dpos[3]
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
+    @cython.cdivision(True)
+    def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+        cdef int i
         cdef np.float64_t pos[3]
-        if num_octs == -1:
+        cdef np.int64_t sfc
+        cdef np.ndarray[np.int64_t, ndim=1] oct_count
+        if self._last_selector_id == hash(selector):
+            return self._last_mask
+        if num_cells == -1:
             # We need to count, but this process will only occur one time,
-            # since num_octs will later be cached.
-            num_octs = self.sfc_end - self.sfc_start + 1
-        assert(num_octs == (self.sfc_end - self.sfc_start + 1))
-        cdef np.ndarray[np.uint8_t, ndim=1] mask
-        cdef int num_oct_levels
-        cdef int max_level = self.artio_handle.max_level
-        cdef int *num_octs_per_level = <int *>malloc(
-            (max_level + 1)*sizeof(int))
-        mask = np.zeros((num_octs), dtype="uint8")
-        status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
-                                            self.sfc_end)
-        check_artio_status(status) 
+            # since num_cells will later be cached.
+            num_cells = self.sfc_end - self.sfc_start + 1
+        mask = np.zeros((num_cells), dtype="uint8")
+        oct_count = self.range_handler.oct_count
         for sfc in range(self.sfc_start, self.sfc_end + 1):
-            # We check if the SFC is in our selector, and if so, we copy
-            # Note that because we initialize to zeros, we can just continue if
-            # it's not included.
+            if oct_count[sfc - self.sfc_start] > 0: continue
             self.sfc_to_pos(sfc, pos)
             if selector.select_cell(pos, self.dds) == 0: continue
-            # Now we just need to check if the cells are refined.
-            status = artio_grid_read_root_cell_begin( self.handle,
-                sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
-            check_artio_status(status)
-            status = artio_grid_read_root_cell_end( self.handle )
-            check_artio_status(status)
-            # If refined, we skip
-            if num_oct_levels > 0: continue
             mask[sfc - self.sfc_start] = 1
-        artio_grid_clear_sfc_cache(self.handle)
-        free(num_octs_per_level)
-        return mask.astype("bool")
+        self._last_mask = mask.astype("bool")
+        self._last_selector_id = hash(selector)
+        return self._last_mask
 
     def fill_sfc_particles(self, fields):
         rv = read_sfc_particles(self.artio_handle,
@@ -1223,8 +1244,8 @@
             status = artio_grid_read_root_cell_end( handle )
             check_artio_status(status)
         # Now we have all our sources.
-        status = artio_grid_clear_sfc_cache(handle)
-        check_artio_status(status)
+        #status = artio_grid_clear_sfc_cache(handle)
+        #check_artio_status(status)
         free(field_ind)
         free(field_vals)
         free(grid_variables)

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/9f2ff231136f/
Changeset:   9f2ff231136f
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-26 18:48:50
Summary:     This should finish the non-constant RefFactor for Boxlib data.
Affected #:  1 file

diff -r 30f9b4536566109133f6da6fd9b2ac528b560ca6 -r 9f2ff231136f818b47993acfdeaab1a1018ac21f yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -117,6 +117,20 @@
         coords[:] = self.Level + self.pf.level_offsets[self.Level]
         return coords
 
+    # Override this as well, since refine_by can vary
+    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+        rf = self.pf.ref_factors[self.Level]
+        if dlevel != 1:
+            raise NotImplementedError
+        gi, cgi = self.get_global_startindex(), child.get_global_startindex()
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+                              self.ActiveDimensions)
+        endIndex += (startIndex == endIndex)
+        mask[startIndex[0]:endIndex[0],
+             startIndex[1]:endIndex[1],
+             startIndex[2]:endIndex[2]] = tofill
+
 class BoxlibHierarchy(GridGeometryHandler):
     grid = BoxlibGrid
     def __init__(self, pf, data_style='boxlib_native'):


https://bitbucket.org/yt_analysis/yt-3.0/commits/a7722c2f9bd9/
Changeset:   a7722c2f9bd9
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-27 00:19:51
Summary:     Adding a relative_refinement function for StaticOutput.

This function may be refactored out at some time in the future.
Affected #:  3 files

diff -r 9f2ff231136f818b47993acfdeaab1a1018ac21f -r a7722c2f9bd9e2fdf080a11ae20b5d48403c0e45 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -398,7 +398,8 @@
             center, pf, field_parameters)
         self.left_edge = np.array(left_edge)
         self.level = level
-        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+
+        rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
         rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
         self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
@@ -488,9 +489,11 @@
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
                          for field in fields]
         domain_dims = self.pf.domain_dimensions.astype("int64") \
-                    * self.pf.refine_by**self.level
+                    * self.pf.relative_refinement(0, self.level)
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
+            # NOTE: This usage of "refine_by" is actually *okay*, because it's
+            # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
                         domain_dims, self.pf.refine_by)
@@ -647,10 +650,12 @@
         ls = self._initialize_level_state(fields)
         for level in range(self.level + 1):
             domain_dims = self.pf.domain_dimensions.astype("int64") \
-                        * self.pf.refine_by**level
+                        * self.pf.relative_refinement(0, self.level)
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
+                # NOTE: This usage of "refine_by" is actually *okay*, because it's
+                # being used with respect to iref, which is *already* scaled!
                 fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
                             chunk.ires, domain_dims, self.pf.refine_by)
@@ -682,14 +687,16 @@
     def _update_level_state(self, level_state):
         ls = level_state
         if ls.current_level >= self.level: return
+        rf = float(self.pf.relative_refinement(
+                    ls.current_level, ls.current_level + 1))
         ls.current_level += 1
-        ls.current_dx = self._base_dx / self.pf.refine_by**ls.current_level
+        ls.current_dx = self._base_dx / \
+            self.pf.relative_refinement(0, ls.current_level)
         self._setup_data_source(ls)
         LL = self.left_edge - self.pf.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex = np.rint(LL / ls.current_dx).astype('int64') - 1
         ls.domain_iwidth = np.rint(self.pf.domain_width/ls.current_dx).astype('int64') 
-        rf = float(self.pf.refine_by)
         input_left = (level_state.old_global_startindex + 0.5) * rf 
         width = (self.ActiveDimensions*self.dds)
         output_dims = np.rint(width/level_state.current_dx+0.5).astype("int32") + 2

diff -r 9f2ff231136f818b47993acfdeaab1a1018ac21f -r a7722c2f9bd9e2fdf080a11ae20b5d48403c0e45 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -321,6 +321,9 @@
             raise RuntimeError
         return int(o2)
 
+    def relative_refinement(self, l0, l1):
+        return self.refine_by**(l1-l0)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)

diff -r 9f2ff231136f818b47993acfdeaab1a1018ac21f -r a7722c2f9bd9e2fdf080a11ae20b5d48403c0e45 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -575,6 +575,10 @@
             v = getattr(self, a)
             mylog.info("Parameters: %-25s = %s", a, v)
 
+    def relative_refinement(self, l0, l1):
+        offset = self.level_offsets[l1] - self.level_offsets[l0]
+        return self.refine_by**(l1-l0 + offset)
+
 class OrionHierarchy(BoxlibHierarchy):
     
     def __init__(self, pf, data_style='orion_native'):


https://bitbucket.org/yt_analysis/yt-3.0/commits/cd0e601e78d1/
Changeset:   cd0e601e78d1
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-09-28 00:34:30
Summary:     field_order refers to the order fields will be found in the file.
field_indices is a dictionary, which is inherently unsorted.
Affected #:  2 files

diff -r a7722c2f9bd9e2fdf080a11ae20b5d48403c0e45 -r cd0e601e78d1a7815cb01b27442fe42e283ce206 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -312,6 +312,9 @@
         self.field_list = self.parameter_file._field_list[:]
         self.field_indexes = dict((f, i)
                                 for i, f in enumerate(self.field_list))
+        # There are times when field_list may change.  We copy it here to
+        # avoid that possibility.
+        self.field_order = [f for f in self.field_list]
 
     def _setup_classes(self):
         dd = self._get_data_reader_dict()

diff -r a7722c2f9bd9e2fdf080a11ae20b5d48403c0e45 -r cd0e601e78d1a7815cb01b27442fe42e283ce206 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -61,7 +61,6 @@
             grids_by_file[g.filename].append(g)
         dtype = self.pf.hierarchy._dtype
         bpr = dtype.itemsize
-        field_indices = self.pf.hierarchy.field_indexes
         field_list = set(f[1] for f in fields)
         for filename in grids_by_file:
             grids = grids_by_file[filename]
@@ -72,7 +71,7 @@
                 grid._seek(f)
                 count = grid.ActiveDimensions.prod()
                 size = count * bpr
-                for field in field_indices:
+                for field in self.pf.hierarchy.field_order:
                     if field in field_list:
                         # We read it ...
                         v = np.fromfile(f, dtype=dtype, count=count)


https://bitbucket.org/yt_analysis/yt-3.0/commits/3034def0dbfa/
Changeset:   3034def0dbfa
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-02 15:29:47
Summary:     Moving Orion tests to Boxlib
Affected #:  4 files

diff -r cd0e601e78d1a7815cb01b27442fe42e283ce206 -r 3034def0dbfa0a3d5f254d64a93537590dd21476 yt/frontends/boxlib/tests/test_orion.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -0,0 +1,40 @@
+"""
+Orion frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.orion.api import OrionStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_pf(radadvect)
+def test_radadvect():
+    pf = data_dir_load(radadvect)
+    yield assert_equal, str(pf), "plt00000"
+    for test in small_patch_amr(radadvect, _fields):
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_pf(rt)
+def test_radtube():
+    pf = data_dir_load(rt)
+    yield assert_equal, str(pf), "plt00500"
+    for test in small_patch_amr(rt, _fields):
+        yield test

diff -r cd0e601e78d1a7815cb01b27442fe42e283ce206 -r 3034def0dbfa0a3d5f254d64a93537590dd21476 yt/frontends/orion/tests/test_outputs.py
--- a/yt/frontends/orion/tests/test_outputs.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Orion frontend tests
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.testing import *
-from yt.utilities.answer_testing.framework import \
-    requires_pf, \
-    small_patch_amr, \
-    big_patch_amr, \
-    data_dir_load
-from yt.frontends.orion.api import OrionStaticOutput
-
-_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
-
-radadvect = "RadAdvect/plt00000"
- at requires_pf(radadvect)
-def test_radadvect():
-    pf = data_dir_load(radadvect)
-    yield assert_equal, str(pf), "plt00000"
-    for test in small_patch_amr(radadvect, _fields):
-        yield test
-
-rt = "RadTube/plt00500"
- at requires_pf(rt)
-def test_radtube():
-    pf = data_dir_load(rt)
-    yield assert_equal, str(pf), "plt00500"
-    for test in small_patch_amr(rt, _fields):
-        yield test


https://bitbucket.org/yt_analysis/yt-3.0/commits/d46d5add1b8c/
Changeset:   d46d5add1b8c
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-07 15:17:41
Summary:     Importing boxlib instead of other formats.
Affected #:  2 files

diff -r 3034def0dbfa0a3d5f254d64a93537590dd21476 -r d46d5add1b8c4d04a7eee6416f049a76baf0ae38 yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -14,12 +14,17 @@
 #-----------------------------------------------------------------------------
 
 from .data_structures import \
-      OrionGrid, \
+      BoxlibGrid, \
+      BoxlibHierarchy, \
+      BoxlibStaticOutput, \
       OrionHierarchy, \
-      OrionStaticOutput
+      OrionStaticOutput, \
+      CastroStaticOutput, \
+      MaestroStaticOutput
 
 from .fields import \
       OrionFieldInfo, \
+      KnownOrionFields, \
       add_orion_field
 
 from .io import \

diff -r 3034def0dbfa0a3d5f254d64a93537590dd21476 -r d46d5add1b8c4d04a7eee6416f049a76baf0ae38 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -65,14 +65,21 @@
     EnzoSimulation, EnzoFieldInfo, \
     add_enzo_field, add_enzo_1d_field, add_enzo_2d_field
 
-# from yt.frontends.castro.api import \
-#     CastroStaticOutput, CastroFieldInfo, add_castro_field
+# Boxlib stuff
+from yt.frontends.boxlib.api import \
+    BoxlibStaticOutput
 
-# from yt.frontends.nyx.api import \
-#     NyxStaticOutput, NyxFieldInfo, add_nyx_field
+# Orion stuff
+from yt.frontends.boxlib.api import \
+    OrionStaticOutput, OrionFieldInfo, add_orion_field
 
-# from yt.frontends.orion.api import \
-#     OrionStaticOutput, OrionFieldInfo, add_orion_field
+# Maestro stuff
+from yt.frontends.boxlib.api import \
+    MaestroStaticOutput
+
+# Castro stuff
+from yt.frontends.boxlib.api import \
+    CastroStaticOutput
 
 from yt.frontends.flash.api import \
     FLASHStaticOutput, FLASHFieldInfo, add_flash_field
@@ -108,9 +115,6 @@
 from yt.frontends.pluto.api import \
      PlutoStaticOutput, PlutoFieldInfo, add_pluto_field
 
-#from yt.frontends.maestro.api import \
-#    MaestroStaticOutput, MaestroFieldInfo, add_maestro_field
-
 from yt.frontends.stream.api import \
     StreamStaticOutput, StreamFieldInfo, add_stream_field, \
     StreamHandler, load_uniform_grid, load_amr_grids, \


https://bitbucket.org/yt_analysis/yt-3.0/commits/b07ece8c7a41/
Changeset:   b07ece8c7a41
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 21:15:07
Summary:     Fixing case of _max_level == 0.
Affected #:  1 file

diff -r d46d5add1b8c4d04a7eee6416f049a76baf0ae38 -r b07ece8c7a415e75e3456efa4e42b30069809963 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -507,7 +507,7 @@
                                 header_file.readline().split()])
         if ref_factors.size == 0:
             # We use a default of two, as Nyx doesn't always output this value
-            ref_factors = [2] * self._max_level
+            ref_factors = [2] * (self._max_level + 1)
         # We can't vary refinement factors based on dimension, or whatever else
         # they are vaied on.  In one curious thing, I found that some Castro 3D
         # data has only two refinement factors, which I don't know how to


https://bitbucket.org/yt_analysis/yt-3.0/commits/92361a27de52/
Changeset:   92361a27de52
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 21:27:21
Summary:     Adding Nyx here, will shortly move particle IO in.
Affected #:  2 files

diff -r b07ece8c7a415e75e3456efa4e42b30069809963 -r 92361a27de52dbb5e685dec9198b3791749342de yt/frontends/boxlib/api.py
--- a/yt/frontends/boxlib/api.py
+++ b/yt/frontends/boxlib/api.py
@@ -28,4 +28,4 @@
       add_orion_field
 
 from .io import \
-      IOHandlerNative
+      IOHandlerBoxlib

diff -r b07ece8c7a415e75e3456efa4e42b30069809963 -r 92361a27de52dbb5e685dec9198b3791749342de yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -724,4 +724,25 @@
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+class NyxStaticOutput(BoxlibStaticOutput):
 
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        pname = args[0].rstrip("/")
+        dn = os.path.dirname(pname)
+        if len(args) > 1:
+            kwargs['paramFilename'] = args[1]
+
+        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
+
+        # @todo: new Nyx output.
+        # We check for the job_info file's existence because this is currently
+        # what distinguishes Nyx data from MAESTRO data.
+        pfn = os.path.join(pfname)
+        if not os.path.exists(pfn): return False
+        nyx = any(("nyx." in line for line in open(pfn)))
+        maestro = os.path.exists(os.path.join(pname, "job_info"))
+        orion = (not nyx) and (not maestro)
+        return nyx
+


https://bitbucket.org/yt_analysis/yt-3.0/commits/0975eb84d3dd/
Changeset:   0975eb84d3dd
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 21:58:00
Summary:     Port Nyx to new Boxlib infrastructure.
Affected #:  2 files

diff -r 92361a27de52dbb5e685dec9198b3791749342de -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -16,6 +16,7 @@
 import os
 import re
 import weakref
+import itertools
 
 from collections import defaultdict
 from string import strip, rstrip
@@ -724,8 +725,51 @@
         if any(line.startswith("Castro   ") for line in lines): return True
         return False
 
+
+class NyxHierarchy(BoxlibHierarchy):
+
+    def __init__(self, pf, data_style='nyx_native'):
+        super(NyxHierarchy, self).__init__(pf, data_style)
+        self._read_particle_header()
+
+    def _read_particle_header(self):
+        if not self.pf.parameters["particles.write_in_plotfile"]:
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
+            return
+        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
+                  ['particle_mass'] +  \
+                  ['particle_velocity_%s' % ax for ax in 'xyz']:
+            self.field_list.append(("io", fn))
+        header = open(os.path.join(self.pf.output_dir, "DM", "Header"))
+        version = header.readline()
+        ndim = header.readline()
+        nfields = header.readline()
+        ntotalpart = int(header.readline())
+        dummy = header.readline() # nextid
+        maxlevel = int(header.readline()) # max level
+
+        # Skip over how many grids on each level; this is degenerate
+        for i in range(maxlevel + 1):dummy = header.readline()
+
+        grid_info = np.fromiter((int(i) for line in header.readlines()
+                                 for i in line.split()),
+                                dtype='int64',
+                                count=3*self.num_grids).reshape((self.num_grids, 3))
+        # we need grid_info in `populate_grid_objects`, so save it to self
+
+        for g, pg in itertools.izip(self.grids, grid_info):
+            g.particle_filename = os.path.join(self.pf.output_dir, "DM",
+                                               "Level_%s" % (g.Level),
+                                               "DATA_%04i" % pg[0])
+            g.NumberOfParticles = pg[1]
+            g._particle_offset = pg[2]
+
+        self.grid_particle_count[:, 0] = grid_info[:, 1]
+
 class NyxStaticOutput(BoxlibStaticOutput):
 
+    _hierarchy_class = NyxHierarchy
+
     @classmethod
     def _is_valid(cls, *args, **kwargs):
         # fill our args
@@ -746,3 +790,49 @@
         orion = (not nyx) and (not maestro)
         return nyx
 
+    def _parse_parameter_file(self):
+        super(NyxStaticOutput, self)._parse_parameter_file()
+        #return
+        # Nyx is always cosmological.
+        self.cosmological_simulation = 1
+        self.omega_lambda = self.parameters["comoving_OmL"]
+        self.omega_matter = self.parameters["comoving_OmM"]
+        self.hubble_constant = self.parameters["comoving_h"]
+
+        # Read in the `comoving_a` file and parse the value. We should fix this
+        # in the new Nyx output format...
+        a_file = open(os.path.join(self.output_dir, "comoving_a"))
+        a_string = a_file.readline().strip()
+        a_file.close()
+
+        # Set the scale factor and redshift
+        self.cosmological_scale_factor = float(a_string)
+        self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
+
+        # alias
+        self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        if self.parameters["particles.write_in_plotfile"]:
+            self.particle_types = ("io",)
+            self.particle_types_raw = self.particle_types
+
+    def _set_units(self):
+        super(NyxStaticOutput, self)._set_units()
+        # Masses are always in $ M_{\odot} $
+        self.units["particle_mass"] = 1.989e33
+
+        mylog.warning("Length units: setting 1.0 = 1.0 Mpc.")
+        self.units.update(mpc_conversion)
+        self.units["density"] = self.units["particle_mass"]/(self.units["cm"])**3
+        self.units["particle_mass_density"] = self.units["density"]
+
+        mylog.warning("Time units: setting 1.0 = Mpc/km s ~ 10^12 yr .")
+        self.time_units["s"] = 1.0 / 3.08568025e19
+        self.conversion_factors["Time"] = 1.0 / 3.08568025e19
+
+        cf = 1e5 * (self.cosmological_scale_factor)
+        for ax in "xyz":
+            self.units["particle_velocity_%s" % ax] = cf
+
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.time_units["s"] / sec_conversion[unit]
+

diff -r 92361a27de52dbb5e685dec9198b3791749342de -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 yt/frontends/boxlib/io.py
--- a/yt/frontends/boxlib/io.py
+++ b/yt/frontends/boxlib/io.py
@@ -175,3 +175,21 @@
             tr)
         return tr
 
+nyx_particle_field_names = ['particle_position_%s' % ax for ax in 'xyz'] + \
+                           ['particle_mass'] +  \
+                           ['particle_velocity_%s' % ax for ax in 'xyz']
+
+class IOHandlerNyx(IOHandlerBoxlib):
+    _data_style = "nyx_native"
+
+    def _read_particle_coords(self, chunks, ptf):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+                            nyx_particle_field_names.index(field),
+                            len(nyx_particle_field_names), tr)
+
+    def _read_particle_fields(self, chunks, ptf, fields):
+        pass


https://bitbucket.org/yt_analysis/yt-3.0/commits/dce03e91a6a0/
Changeset:   dce03e91a6a0
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 21:58:44
Summary:     Removing old, now-obsolete frontends.
Affected #:  32 files

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/__init__.py
--- a/yt/frontends/castro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CastroGrid, \
-      CastroHierarchy, \
-      CastroStaticOutput
-
-from .fields import \
-      CastroFieldInfo, \
-      add_castro_field
-
-from .io import \
-      IOHandlerNative

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""
-Data structures for Castro.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import re
-import os
-import weakref
-import itertools
-from collections import defaultdict
-from string import strip, rstrip
-from stat import ST_CTIME
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
-from yt.data_objects.grid_patch import AMRGridPatch
-from yt.geometry.grid_geometry_handler import GridGeometryHandler
-from yt.data_objects.static_output import StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.lib import get_box_grids_level
-
-from .definitions import \
-    castro2enzoDict, \
-    parameterDict, \
-    yt2castroFieldsDict, \
-    castro_FAB_header_pattern, \
-    castro_particle_field_names, \
-    boxlib_bool_to_int
-from .fields import \
-    CastroFieldInfo, \
-    KnownCastroFields, \
-    add_castro_field
-
-
-class CastroGrid(AMRGridPatch):
-    _id_offset = 0
-
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
-                 dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(index, **kwargs)
-        self.filename = filename
-        self._offset = offset
-        self._paranoid = paranoia  # TODO: Factor this behavior out in tests
-
-        ### TODO: error check this (test)
-        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
-        self.LeftEdge  = LeftEdge.copy()
-        self.RightEdge = RightEdge.copy()
-        self.index = index
-        self.Level = level
-
-    def get_global_startindex(self):
-        return self.start_index
-
-    def _prepare_grid(self):
-        """ Copies all the appropriate attributes from the hierarchy. """
-        # This is definitely the slowest part of generating the hierarchy.
-        # Now we give it pointers to all of its attributes
-        # Note that to keep in line with Enzo, we have broken PEP-8
-
-        h = self.hierarchy # cache it
-        #self.StartIndices = h.gridStartIndices[self.id]
-        #self.EndIndices = h.gridEndIndices[self.id]
-        h.grid_levels[self.id,0] = self.Level
-        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
-        h.grid_right_edge[self.id,:] = self.RightEdge[:]
-        #self.Time = h.gridTimes[self.id,0]
-        #self.NumberOfParticles = h.gridNumberOfParticles[self.id,0]
-        self.field_indexes = h.field_indexes
-        self.Children = h.gridTree[self.id]
-        pIDs = h.gridReverseTree[self.id]
-
-        if len(pIDs) > 0:
-            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
-        else:
-            self.Parent = None
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        dx = self.hierarchy.grid_dxs[self.index][0]
-        dy = self.hierarchy.grid_dys[self.index][0]
-        dz = self.hierarchy.grid_dzs[self.index][0]
-        self.dds = np.array([dx, dy, dz])
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    def __repr__(self):
-        return "CastroGrid_%04i" % (self.id)
-
-class CastroHierarchy(GridGeometryHandler):
-    grid = CastroGrid
-
-    def __init__(self, pf, data_style='castro_native'):
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        header_filename = os.path.join(pf.fullplotdir, 'Header')
-        self.directory = pf.fullpath
-        self.data_style = data_style
-
-        # This also sets up the grid objects
-        self.read_global_header(header_filename,
-                                self.parameter_file.paranoid_read) 
-        self.read_particle_header()
-        self._cache_endianness(self.levels[-1].grids[-1])
-
-        super(CastroHierarchy, self).__init__(pf, data_style)
-        self._setup_data_io()
-        self._setup_field_list()
-        self._populate_hierarchy()
-
-    def read_global_header(self, filename, paranoid_read):
-        """ Read the global header file for an Castro plotfile output. """
-        counter = 0
-        header_file = open(filename, 'r')
-        self._global_header_lines = header_file.readlines()
-
-        # parse the file
-        self.castro_version = self._global_header_lines[0].rstrip()
-        self.n_fields = int(self._global_header_lines[1])
-
-        counter = self.n_fields + 2
-        self.field_list = []
-        for i, line in enumerate(self._global_header_lines[2:counter]):
-            self.field_list.append(line.rstrip())
-
-        # this is unused...eliminate it?
-        #for f in self.field_indexes:
-        #    self.field_list.append(castro2ytFieldsDict.get(f, f))
-
-        self.dimension = int(self._global_header_lines[counter])
-        if self.dimension != 3:
-            raise RunTimeError("Castro must be in 3D to use yt.")
-
-        counter += 1
-        self.Time = float(self._global_header_lines[counter])
-        counter += 1
-        self.finest_grid_level = int(self._global_header_lines[counter])
-        self.n_levels = self.finest_grid_level + 1
-        counter += 1
-
-        # quantities with _unnecessary are also stored in the inputs
-        # file and are not needed.  they are read in and stored in
-        # case in the future we want to enable a "backwards" way of
-        # taking the data out of the Header file and using it to fill
-        # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #np.array(map(int, self._global_header_lines[counter].split()))
-        counter += 1
-        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
-        #domain_re.search(self._global_header_lines[counter]).groups()
-        counter += 1
-        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
-        counter += 1
-
-        self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = np.array(map(float, line.split()))
-        counter += self.n_levels
-        self.geometry = int(self._global_header_lines[counter])
-        if self.geometry != 0:
-            raise RunTimeError("yt only supports cartesian coordinates.")
-        counter += 1
-
-        # this is just to debug. eventually it should go away.
-        linebreak = int(self._global_header_lines[counter])
-        if linebreak != 0:
-            raise RunTimeError("INTERNAL ERROR! Header is unexpected size")
-        counter += 1
-
-        # Each level is one group with ngrids on it. each grid has 3 lines of 2 reals
-        # BoxLib madness
-        self.levels = []
-        grid_counter = 0
-        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
-        re_file_finder = re.compile(file_finder_pattern)
-        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
-        re_dim_finder = re.compile(dim_finder_pattern)
-        data_files_pattern = r"Level_[\d]/"
-        data_files_finder = re.compile(data_files_pattern)
-
-        for level in range(0, self.n_levels):
-            tmp = self._global_header_lines[counter].split()
-            # Should this be grid_time or level_time??
-            lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
-            counter += 1
-            nsteps = int(self._global_header_lines[counter])
-            counter += 1
-            self.levels.append(CastroLevel(lev, ngrids))
-            # Open level header, extract file names and offsets for each grid.
-            # Read slightly out of order here: at the end of the lo, hi pairs
-            # for x, y, z is a *list* of files types in the Level directory. 
-            # Each type has Header and a number of data files
-            # (one per processor)
-            tmp_offset = counter + 3*ngrids
-            nfiles = 0
-            key_off = 0
-            files =   {} # dict(map(lambda a: (a,[]), self.field_list))
-            offsets = {} # dict(map(lambda a: (a,[]), self.field_list))
-
-            while (nfiles + tmp_offset < len(self._global_header_lines) and
-                   data_files_finder.match(self._global_header_lines[nfiles+tmp_offset])):
-                filen = os.path.join(self.parameter_file.fullplotdir,
-                                     self._global_header_lines[nfiles+tmp_offset].strip())
-                # open each "_H" header file, and get the number of
-                # components within it
-                level_header_file = open(filen+'_H','r').read()
-                start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
-                grid_file_offset = re_file_finder.findall(level_header_file)
-                ncomp_this_file = int(level_header_file.split('\n')[2])
-
-                for i in range(ncomp_this_file):
-                    key = self.field_list[i+key_off]
-                    f, o = zip(*grid_file_offset)
-                    files[key] = f
-                    offsets[key] = o
-                    self.field_indexes[key] = i
-
-                key_off += ncomp_this_file
-                nfiles += 1
-
-            # convert dict of lists to list of dicts
-            fn = []
-            off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,
-                                     'Level_%i' % level)
-            for i in range(ngrids):
-                fi = [os.path.join(lead_path, files[key][i]) for key in self.field_list]
-                of = [int(offsets[key][i]) for key in self.field_list]
-                fn.append(dict(zip(self.field_list, fi)))
-                off.append(dict(zip(self.field_list, of)))
-
-            for grid in range(0, ngrids):
-                gfn = fn[grid]  # filename of file containing this grid
-                gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                ylo, yhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                zlo, zhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                lo = np.array([xlo, ylo, zlo])
-                hi = np.array([xhi, yhi, zhi])
-                dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
-                                                       level, gfn, gfo, dims,
-                                                       start, stop,
-                                                       paranoia=paranoid_read,  ### TODO: at least the code isn't schizophrenic paranoid
-                                                       hierarchy=self))
-                grid_counter += 1   # this is global, and shouldn't be reset
-                                    # for each level
-
-            # already read the filenames above...
-            counter += nfiles
-            self.num_grids = grid_counter
-            self.float_type = 'float64'
-
-        self.maxLevel = self.n_levels - 1
-        self.max_level = self.n_levels - 1
-        header_file.close()
-
-    def read_particle_header(self):
-        # We need to get particle offsets and particle counts
-        if not self.parameter_file.use_particles:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-
-        self.field_list += castro_particle_field_names[:]
-        header = open(os.path.join(self.parameter_file.fullplotdir, "DM",
-                                   "Header"))
-        version = header.readline()
-        ndim = header.readline()
-        nfields = header.readline()
-        ntotalpart = int(header.readline())
-        dummy = header.readline() # nextid
-        maxlevel = int(header.readline()) # max level
-
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = np.fromiter((int(i)
-                                 for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        self.pgrid_info = grid_info
-
-    def _cache_endianness(self, test_grid):
-        """
-        Cache the endianness and bytes perreal of the grids by using a test grid
-        and assuming that all grids have the same endianness. This is a pretty
-        safe assumption since Castro uses one file per processor, and if you're
-        running on a cluster with different endian processors, then you're on
-        your own!
-
-        """
-        # open the test file and grab the header
-        in_file = open(os.path.expanduser(test_grid.filename[self.field_list[0]]), 'rb')
-        header = in_file.readline()
-        in_file.close()
-        header.strip()
-        # Parse it. The pattern is in castro.definitions.py
-        header_re = re.compile(castro_FAB_header_pattern)
-        bytes_per_real, endian, start, stop, centerType, n_components = header_re.search(header).groups()
-        self._bytes_per_real = int(bytes_per_real)
-        if self._bytes_per_real == int(endian[0]):
-            dtype = '<'
-        elif self._bytes_per_real == int(endian[-1]):
-            dtype = '>'
-        else:
-            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-        dtype += ('f%i' % self._bytes_per_real) # always a floating point
-        self._dtype = dtype
-
-    def _calculate_grid_dimensions(self, start_stop):
-        start = np.array(map(int, start_stop[0].split(',')))
-        stop = np.array(map(int, start_stop[1].split(',')))
-        dimension = stop - start + 1
-        return dimension, start, stop
-
-    def _populate_grid_objects(self):
-        mylog.debug("Creating grid objects")
-
-        self.grids = np.concatenate([level.grids for level in self.levels])
-        basedir = self.parameter_file.fullplotdir
-
-        for g, pg in itertools.izip(self.grids, self.pgrid_info):
-            g.particle_filename = os.path.join(
-                basedir, "DM", "Level_%s" % (g.Level), "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:,0] = self.pgrid_info[:,1]
-        del self.pgrid_info
-
-        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
-        self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
-                                  for level in self.levels], axis=0)
-
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
-
-        left_edges = []
-        right_edges = []
-        dims = []
-        for level in self.levels:
-            left_edges += [g.LeftEdge for g in level.grids]
-            right_edges += [g.RightEdge for g in level.grids]
-            dims += [g.ActiveDimensions for g in level.grids]
-
-        self.grid_left_edge = np.array(left_edges)
-        self.grid_right_edge = np.array(right_edges)
-        self.grid_dimensions = np.array(dims)
-        self.gridReverseTree = [] * self.num_grids
-        self.gridReverseTree = [ [] for i in range(self.num_grids)]
-        self.gridTree = [ [] for i in range(self.num_grids)]
-
-        mylog.debug("Done creating grid objects")
-
-    def _populate_hierarchy(self):
-        self._setup_grid_tree()
-        #self._setup_grid_corners()
-
-        for i, grid in enumerate(self.grids):
-            if (i % 1e4) == 0:
-                mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
-
-            grid._prepare_grid()
-            grid._setup_dx()
-
-    def _setup_grid_tree(self):
-        mask = np.empty(self.grids.size, dtype='int32')
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            children = self.grids[mask.astype("bool")]
-            #assert(len(children) == len(self._get_grid_children(grid)))
-            for child in children:
-                self.gridReverseTree[child.id].append(i)
-                self.gridTree[i].append(weakref.proxy(child))
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        dd["field_indexes"] = self.field_indexes
-        GridGeometryHandler._setup_classes(self, dd)
-        #self._add_object_class('grid', "CastroGrid", CastroGridBase, dd)
-        self.object_types.sort()
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return self.grids[mask]
-
-    def _setup_field_list(self):
-        if self.parameter_file.use_particles:
-            # We know which particle fields will exist -- pending further
-            # changes in the future.
-            for field in castro_particle_field_names:
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-                # Note that we call add_castro_field on the field_info directly.  This
-                # will allow the same field detection mechanism to work for 1D, 2D
-                # and 3D fields.
-                self.pf.field_info.add_castro_field(
-                        field, lambda a, b: None,
-                        convert_function=cf, take_log=False,
-                        particle_type=True)
-
-    ### TODO: check if this can be removed completely
-    def _count_grids(self):
-        """
-        this is already provided in ???
-
-        """
-        pass
-
-    def _initialize_grid_arrays(self):
-        mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
-
-    def _parse_hierarchy(self):
-        pass
-
-    def _detect_fields(self):
-        pass
-
-    def _setup_derived_fields(self):
-        pass
-
-    def _initialize_state_variables(self):
-        """override to not re-initialize num_grids in GridGeometryHandler.__init__
-
-        """
-        self._parallel_locking = False
-        self._data_file = None
-        self._data_mode = None
-        self._max_locations = {}
-
-class CastroLevel:
-    def __init__(self, level, ngrids):
-        self.level = level
-        self.ngrids = ngrids
-        self.grids = []
-
-class CastroStaticOutput(StaticOutput):
-    """
-    This class is a stripped down class that simply reads and parses *filename*,
-    without looking at the Castro hierarchy.
-
-    """
-    _hierarchy_class = CastroHierarchy
-    _fieldinfo_fallback = CastroFieldInfo
-    _fieldinfo_known = KnownCastroFields
-
-    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
-                 data_style='castro_native', paranoia=False,
-                 storage_filename = None):
-        """
-        Need to override for Castro file structure.
-
-        the paramfile is usually called "inputs"
-        and there may be a fortran inputs file usually called "probin"
-        plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
-        """
-        self.storage_filename = storage_filename
-        self.paranoid_read = paranoia
-        self.parameter_filename = paramFilename
-        self.fparameter_filename = fparamFilename
-        self.__ipfn = paramFilename
-        self.fparameters = {}
-        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
-                                                 data_style='castro_native')
-
-
-        # These should maybe not be hardcoded?
-        ### TODO: this.
-        self.parameters["HydroMethod"] = 'castro' # always PPM DE
-        self.parameters["Time"] = 1.0 # default unit is 1...
-        self.parameters["DualEnergyFormalism"] = 0 # always off.
-        self.parameters["EOSType"] = -1 # default
-
-        if self.fparameters.has_key("mu"):
-            self.parameters["mu"] = self.fparameters["mu"]
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        pname = args[0].rstrip("/")
-        dn = os.path.dirname(pname)
-        if len(args) > 1:
-            kwargs['paramFilename'] = args[1]
-
-        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
-
-        # We check for the job_info file's existence because this is currently
-        # what distinguishes Castro data from MAESTRO data.
-        ### ^ that is nuts
-        pfn = os.path.join(pfname)
-        if not os.path.exists(pfn):
-            return False
-        castro = any(("castro." in line for line in open(pfn)))
-        nyx = any(("nyx." in line for line in open(pfn)))
-        castro = castro and (not nyx) # it's only castro if it's not nyx
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not castro) and (not maestro)
-        return castro
-
-    def _parse_parameter_file(self):
-        """
-        Parses the parameter file and establishes the various dictionaries.
-
-        """
-        # Boxlib madness
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
-        self._parse_header_file()
-        self.parameter_filename = self._localize(self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(self.fparameter_filename, 'probin')
-        if os.path.isfile(self.fparameter_filename):
-            self._parse_fparameter_file()
-            for param in self.fparameters:
-                if castro2enzoDict.has_key(param):
-                    self.parameters[castro2enzoDict[param]] = self.fparameters[param]
-
-        # Let's read the file
-        self.unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.parameter_filename).readlines()
-        self.use_particles = False
-
-        for line in lines:
-            if line.find("#") >= 1: # Keep the commented lines...
-                line = line[:line.find("#")]
-            line = line.strip().rstrip()
-            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
-                continue
-
-            try:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-
-            if castro2enzoDict.has_key(param):
-                paramName = castro2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
-            elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("particles.write_in_plotfile"):
-                self.use_particles = boxlib_bool_to_int(vals)
-            self.fparameters[param] = vals
-
-        self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
-        self.dimensionality = self.parameters["TopGridRank"]
-        self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
-        self.refine_by = self.parameters.get("RefineBy", 2)
-
-        if (self.parameters.has_key("ComovingCoordinates") and
-            bool(self.parameters["ComovingCoordinates"])):
-            self.cosmological_simulation = 1
-            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
-            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
-            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
-
-            # Stupid that we have to read a separate file for this :/
-            a_file = open(os.path.join(self.fullplotdir, "comoving_a"))
-            line = a_file.readline().strip()
-            a_file.close()
-
-            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
-            self.cosmological_scale_factor = float(line)
-            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
-        else:
-            ### TODO: make these defaults automatic
-            self.current_redshift = self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
-
-    def _parse_fparameter_file(self):
-        """
-        Parses the fortran parameter file for Castro. Most of this will be
-        useless, but this is where it keeps mu = mass per particle/m_hydrogen.
-
-        """
-        lines = open(self.fparameter_filename).readlines()
-        for line in lines:
-            if line.count("=") == 1:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0 and vals.count("\"") == 0:
-                    try:
-                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
-                    except ValueError:
-                        print "Failed on line", line
-                else:
-                    t = vals.split()
-                if len(t) == 1:
-                    self.fparameters[param] = t[0]
-                else:
-                    self.fparameters[param] = t
-
-    def _parse_header_file(self):
-        """
-        Parses the BoxLib header file to get any parameters stored there.
-        Hierarchy information is read out of this file in CastroHierarchy. 
-
-        Currently, only Time is read here.
-
-        """
-        header_file = open(os.path.join(self.fullplotdir, "Header"))
-        lines = header_file.readlines()
-        header_file.close()
-        n_fields = int(lines[1])
-        self.current_time = float(lines[3 + n_fields])
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the
-        parameter file.
-
-        """
-        self.units = {}
-        self.time_units = {}
-
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-
-        if self.cosmological_simulation:
-            cf = 1e5 * self.cosmological_scale_factor   # Where does the 1e5 come from?
-            for ax in 'xyz':
-                self.units['particle_velocity_%s' % ax] = cf
-            self.units['particle_mass'] = 1.989e33  ### TODO: Make a global solar mass def
-
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units. Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2castroFieldsDict:
-            self.conversion_factors[key] = 1.0
-        for key in castro_particle_field_names:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/definitions.py
--- a/yt/frontends/castro/definitions.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.funcs import *
-
-def boxlib_bool_to_int(v):
-    try:
-        return int(v)
-    except ValueError:
-        pass
-    v = v.upper().strip()
-    if v[0] == 'T':
-        return 1
-    elif v[0] == 'F':
-        return 0
-
-# TODO: get rid of enzo parameters we do not need
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": boxlib_bool_to_int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                }
-
-# converts the Castro inputs file name to the Enzo/yt name expected
-# throughout the code. key is Castro name, value is Enzo/yt equivalent
-castro2enzoDict = {"amr.n_cell": "TopGridDimensions",
-                  "materials.gamma": "Gamma",
-                  "amr.ref_ratio": "RefineBy",
-                  "castro.use_comoving": "ComovingCoordinates",
-                  "castro.redshift_in": "CosmologyInitialRedshift",
-                  "comoving_OmL": "CosmologyOmegaLambdaNow",
-                  "comoving_OmM": "CosmologyOmegaMatterNow",
-                  "comoving_h": "CosmologyHubbleConstantNow"
-                  }
-
-yt2castroFieldsDict = {}
-castro2ytFieldsDict = {}
-
-castro_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"
-
-castro_particle_field_names = \
-    ['particle_position_%s' % ax for ax in 'xyz'] + \
-    ['particle_mass'] +  \
-    ['particle_velocity_%s' % ax for ax in 'xyz']

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/fields.py
--- a/yt/frontends/castro/fields.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""
-Castro-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    TranslationFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import mh, kboltz
-
-translation_dict = {
-    "x-velocity": "xvel",
-    "y-velocity": "yvel",
-    "z-velocity": "zvel",
-    "Density": "density",
-    "Total_Energy": "eden",
-    "Temperature": "temperature",
-    "x-momentum": "xmom",
-    "y-momentum": "ymom",
-    "z-momentum": "zmom"
-}
-
-# Setup containers for fields possibly in the output files
-KnownCastroFields = FieldInfoContainer()
-add_castro_field = KnownCastroFields.add_field
-
-# and always derived ones
-CastroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CastroFieldInfo.add_field
-
-# Start adding fields
-add_castro_field("density", function=NullFunc, take_log=True,
-                 units=r"\rm{g}/\rm{cm}^3")
-
-# fix projected units
-KnownCastroFields["density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-
-add_castro_field("eden", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("eden")],
-                 units=r"\rm{erg}/\rm{cm}^3")
-
-add_castro_field("xmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("xmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-add_castro_field("ymom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("ymom")],
-                 units=r"\rm{gm}/\rm{cm^2\ s}")
-
-add_castro_field("zmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("zmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-# Now populate derived fields
-for mine, theirs in translation_dict.items():
-    if KnownCastroFields.has_key(theirs):
-        add_field(theirs, function=TranslationFunc(mine),
-                  take_log=KnownCastroFields[theirs].take_log)
-
-# Now fallbacks, in case these fields are not output
-def _xVelocity(field, data):
-    """ Generate x-velocity from x-momentum and density. """
-    return data["xmom"] / data["density"]
-
-add_field("x-velocity", function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _yVelocity(field, data):
-    """ Generate y-velocity from y-momentum and density. """
-    return data["ymom"] / data["density"]
-
-add_field("y-velocity", function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field, data):
-    """ Generate z-velocity from z-momentum and density. """
-    return data["zmom"] / data["density"]
-
-add_field("z-velocity", function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _ThermalEnergy(field, data):
-    """
-    Generate thermal (gas energy). Dual Energy Formalism was implemented by
-    Stella, but this isn't how it's called, so I'll leave that commented out for
-    now.
-
-    """
-    #if data.pf["DualEnergyFormalism"]:
-    #    return data["Gas_Energy"]
-    #else:
-    return data["Total_Energy"] - 0.5 * data["density"] * (
-        data["x-velocity"]**2.0
-        + data["y-velocity"]**2.0
-        + data["z-velocity"]**2.0 )
-
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
-
-def _Pressure(field, data):
-    """
-    M{(Gamma-1.0)*e, where e is thermal energy density
-    
-    NB: this will need to be modified for radiation
-
-    """
-    return (data.pf["Gamma"] - 1.0) * data["ThermalEnergy"]
-
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
-
-def _Temperature(field, data):
-    return ((data.pf["Gamma"] - 1.0) * data.pf["mu"] * mh *
-            data["ThermalEnergy"] / (kboltz * data["Density"]))
-
-add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}",
-          take_log=False)
-
-def _convertParticleMassMsun(data):
-    return 1.0 / 1.989e33
-def _ParticleMassMsun(field, data):
-    return data["particle_mass"]
-
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_ParticleMassMsun)
-
-# Fundamental fields that are usually/always output:
-#   density
-#   xmom
-#   ymom
-#   zmom
-#   rho_E
-#   rho_e
-#   Temp
-#
-# "Derived" fields that are sometimes output:
-#   x_velocity
-#   y_velocity
-#   z_velocity
-#   magvel
-#   grav_x
-#   grav_y
-#   grav_z
-#   maggrav
-#   magvort
-#   pressure
-#   entropy
-#   divu
-#   eint_e (e as derived from the "rho e" variable)
-#   eint_E (e as derived from the "rho E" variable)

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Castro data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-from yt.utilities.io_handler import \
-           BaseIOHandler
-from yt.utilities.lib import \
-            read_castro_particles
-
-from definitions import \
-    yt2castroFieldsDict, \
-    castro_particle_field_names
-
-class IOHandlerNative(BaseIOHandler):
-
-    _data_style = "castro_native"
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
-    def _read_data(self, grid, field):
-        """
-        reads packed multiFABs output by BoxLib in "NATIVE" format.
-
-        """
-        if field in castro_particle_field_names:
-            return self._read_particle_field(grid, field)
-        filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen,'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            headerRe = re.compile(castro_FAB_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian
-            # check endianness (this code is ugly. fix?)
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i'% bytesPerReal) #always a floating point
-
-            # determine size of FAB
-            start = np.array(map(int, start.split(',')))
-            stop = np.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytes_per_real:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-
-        else:
-            start = grid.start_index
-            stop = grid.stop_index
-            dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytes_per_real
-
-        nElements = grid.ActiveDimensions.prod()
-
-        # one field has nElements*bytesPerReal bytes and is located
-        # nElements*bytesPerReal*field_index from the offset location
-        if yt2castroFieldsDict.has_key(field):
-            fieldname = yt2castroFieldsDict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = np.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
-
-        # we can/should also check against the max and min in the header file
-
-        inFile.close()
-        return field

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/castro/setup.py
--- a/yt/frontends/castro/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('castro', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/__init__.py
--- a/yt/frontends/maestro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.maestro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/api.py
--- a/yt/frontends/maestro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.maestro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      MaestroGrid, \
-      MaestroHierarchy, \
-      MaestroStaticOutput
-
-from .fields import \
-      MaestroFieldInfo, \
-      add_maestro_field
-
-from .io import \
-      IOHandlerNative

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/data_structures.py
--- a/yt/frontends/maestro/data_structures.py
+++ /dev/null
@@ -1,583 +0,0 @@
-"""
-Data structures for Maestro - borrows heavily from Orion frontend.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import re
-import os
-import weakref
-import numpy as np
-
-from collections import \
-    defaultdict
-from string import \
-    strip, \
-    rstrip
-from stat import \
-    ST_CTIME
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-           AMRGridPatch
-from yt.geometry.grid_geometry_handler import \
-           GridGeometryHandler
-from yt.data_objects.static_output import \
-           StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-
-from .definitions import \
-    maestro2enzoDict, \
-    parameterTypes, \
-    yt2maestroFieldsDict, \
-    maestro_FAB_header_pattern
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-from .fields import \
-    MaestroFieldInfo, \
-    add_maestro_field, \
-    KnownMaestroFields
-
-
-class MaestroGrid(AMRGridPatch):
-    _id_offset = 0
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions,start,stop,paranoia=False,**kwargs):
-        AMRGridPatch.__init__(self, index,**kwargs)
-        self.filename = filename
-        self._offset = offset
-        self._paranoid = paranoia
-        
-        # should error check this
-        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
-        self.LeftEdge  = LeftEdge.copy()
-        self.RightEdge = RightEdge.copy()
-        self.index = index
-        self.Level = level
-
-    def get_global_startindex(self):
-        return self.start_index
-
-    def _prepare_grid(self):
-        """
-        Copies all the appropriate attributes from the hierarchy
-        """
-        # This is definitely the slowest part of generating the hierarchy
-        # Now we give it pointers to all of its attributes
-        # Note that to keep in line with Enzo, we have broken PEP-8
-        h = self.hierarchy # cache it
-        h.grid_levels[self.id,0] = self.Level
-        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
-        h.grid_right_edge[self.id,:] = self.RightEdge[:]
-        self.field_indexes = h.field_indexes
-        self.Children = h.gridTree[self.id]
-        pIDs = h.gridReverseTree[self.id]
-        if len(pIDs) > 0:
-            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
-        else:
-            self.Parent = None
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        dx = self.hierarchy.grid_dxs[self.index][0]
-        dy = self.hierarchy.grid_dys[self.index][0]
-        dz = self.hierarchy.grid_dzs[self.index][0]
-        self.dds = np.array([dx, dy, dz])
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    def __repr__(self):
-        return "MaestroGrid_%04i" % (self.id)
-
-class MaestroHierarchy(GridGeometryHandler):
-    grid = MaestroGrid
-    def __init__(self, pf, data_style='maestro'):
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        header_filename = os.path.join(pf.fullplotdir,'Header')
-        self.directory = pf.fullpath
-        self.data_style = data_style
-
-        # this also sets up the grid objects
-        self.readGlobalHeader(header_filename, 
-                              self.parameter_file.paranoid_read) 
-
-        pf.current_time = self.Time
-        
-        self.__cache_endianness(self.levels[-1].grids[-1])
-        GridGeometryHandler.__init__(self,pf, self.data_style)
-        self._setup_data_io()
-        self._setup_field_list()
-        self._populate_hierarchy()
-        
-    def readGlobalHeader(self,filename,paranoid_read):
-        """
-        read the global header file for an Maestro plotfile output.
-        """
-        counter = 0
-        header_file = open(filename,'r')
-        self.__global_header_lines = header_file.readlines()
-
-        # parse the file
-        self.maestro_version = self.__global_header_lines[0].rstrip()
-        self.n_fields      = int(self.__global_header_lines[1])
-
-        counter = self.n_fields+2
-        self.field_list = []
-        for i,line in enumerate(self.__global_header_lines[2:counter]):
-            self.field_list.append(line.rstrip())
-
-        self.dimension = int(self.__global_header_lines[counter])
-        if self.dimension != 3:
-            raise RunTimeError("Maestro must be in 3D to use yt.")
-        counter += 1
-        self.Time = float(self.__global_header_lines[counter])
-        counter += 1
-        self.finest_grid_level = int(self.__global_header_lines[counter])
-        self.n_levels = self.finest_grid_level + 1
-        counter += 1
-        # quantities with _unnecessary are also stored in the inputs
-        # file and are not needed.  they are read in and stored in
-        # case in the future we want to enable a "backwards" way of
-        # taking the data out of the Header file and using it to fill
-        # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
-        counter += 1
-        self.domainRightEdge_unnecessary = np.array(map(float,self.__global_header_lines[counter].split()))
-        counter += 1
-        self.refinementFactor_unnecessary = self.__global_header_lines[counter].split()
-        counter += 1
-        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
-
-        counter += 1 # unused line in Maestro BoxLib
-        
-        counter += 1
-        self.dx = np.zeros((self.n_levels,3))
-        for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = np.array(map(float,line.split()))
-
-        counter += self.n_levels # unused line in Maestro BoxLib
-        
-        counter += 1 # unused line in Maestro BoxLib
-        
-        counter += 1
-        # each level is one group with ngrids on it. each grid has 3 lines of 
-        # 2 reals
-        self.levels = []
-        grid_counter = 0
-        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{5}) (\d+)\n"
-        re_file_finder = re.compile(file_finder_pattern)
-        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
-        re_dim_finder = re.compile(dim_finder_pattern)
-        data_files_pattern = r"Level_[\d]+/"
-        data_files_finder = re.compile(data_files_pattern)
-
-        for level in range(0,self.n_levels):
-            tmp = self.__global_header_lines[counter].split()
-            lev,ngrids,unused = int(tmp[0]),int(tmp[1]),float(tmp[2])
-
-            counter += 1 # unused line in Maestro BoxLib
-
-            counter += 1
-            self.levels.append(MaestroLevel(lev,ngrids))
-            # open level header, extract file names and offsets for
-            # each grid
-            # read slightly out of order here: at the end of the lo,hi
-            # pairs for x,y,z is a *list* of files types in the Level
-            # directory. each type has Header and a number of data
-            # files (one per processor)
-            tmp_offset = counter + 3*ngrids
-            nfiles = 0
-            key_off = 0
-            files =   {}
-            offsets = {}
-            while (nfiles+tmp_offset < len(self.__global_header_lines) and 
-                   data_files_finder.match(
-                    self.__global_header_lines[nfiles+tmp_offset])):
-                filen = os.path.join(self.parameter_file.fullplotdir, 
-                                     self.__global_header_lines[nfiles+tmp_offset].strip())
-                # open each "_H" header file, and get the number of
-                # components within it
-                level_header_file = open(filen+'_H','r').read()
-                start_stop_index = re_dim_finder.findall(level_header_file)
-                grid_file_offset = re_file_finder.findall(level_header_file)
-                ncomp_this_file = int(level_header_file.split('\n')[2])
-                f,o = zip(*grid_file_offset)
-                for i in range(ncomp_this_file):
-                    key = self.field_list[i+key_off]
-                    files[key] = f
-                    offsets[key] = o
-                    self.field_indexes[key] = i
-                key_off += ncomp_this_file
-                nfiles += 1
-            # convert dict of lists to list of dicts
-            fn = []
-            off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,
-                                     'Level_%02i'%level)
-            for i in range(ngrids):
-                fi = [os.path.join(lead_path,files[key][i]) for key in self.field_list]
-                of = [int(offsets[key][i]) for key in self.field_list]
-                fn.append(dict(zip(self.field_list,fi)))
-                off.append(dict(zip(self.field_list,of)))
-
-            for grid in range(0,ngrids):
-                gfn = fn[grid]  # filename of file containing this grid
-                gfo = off[grid] # offset within that file
-                xlo,xhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                ylo,yhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                zlo,zhi = map(float,self.__global_header_lines[counter].split())
-                counter+=1
-                lo = np.array([xlo,ylo,zlo])
-                hi = np.array([xhi,yhi,zhi])
-                dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read,hierarchy=self))
-                grid_counter += 1 # this is global, and shouldn't be reset
-                                  # for each level
-
-            # already read the filenames above...
-            counter+=nfiles
-            self.num_grids = grid_counter
-            self.float_type = 'float64'
-
-        self.maxLevel = self.n_levels - 1 
-        self.max_level = self.n_levels - 1
-        header_file.close()
-
-    def __cache_endianness(self,test_grid):
-        """
-        Cache the endianness and bytes per real of the grids by using a
-        test grid and assuming that all grids have the same
-        endianness. This is a pretty safe assumption since Maestro uses
-        one file per processor, and if you're running on a cluster
-        with different endian processors, then you're on your own!
-        """
-        # open the test file & grab the header
-        inFile = open(os.path.expanduser(test_grid.filename[self.field_list[0]]),'rb')
-        header = inFile.readline()
-        inFile.close()
-        header.strip()
-        
-        # parse it. the pattern is in definitions.py
-        headerRe = re.compile(maestro_FAB_header_pattern)
-        bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
-        self._bytesPerReal = int(bytesPerReal)
-        if self._bytesPerReal == int(endian[0]):
-            dtype = '<'
-        elif self._bytesPerReal == int(endian[-1]):
-            dtype = '>'
-        else:
-            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-        dtype += ('f%i' % self._bytesPerReal) # always a floating point
-        self._dtype = dtype
-
-    def __calculate_grid_dimensions(self,start_stop):
-        start = np.array(map(int,start_stop[0].split(',')))
-        stop = np.array(map(int,start_stop[1].split(',')))
-        dimension = stop - start + 1
-        return dimension,start,stop
-        
-    def _populate_grid_objects(self):
-        mylog.debug("Creating grid objects")
-        self.grids = np.concatenate([level.grids for level in self.levels])
-        self.grid_levels = np.concatenate([level.ngrids*[level.level] for level in self.levels])
-        self.grid_levels = self.grid_levels.reshape((self.num_grids,1))
-        grid_dcs = np.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
-        left_edges = []
-        right_edges = []
-        dims = []
-        for level in self.levels:
-            left_edges += [g.LeftEdge for g in level.grids]
-            right_edges += [g.RightEdge for g in level.grids]
-            dims += [g.ActiveDimensions for g in level.grids]
-        self.grid_left_edge = np.array(left_edges)
-        self.grid_right_edge = np.array(right_edges)
-        self.grid_dimensions = np.array(dims)
-        self.gridReverseTree = [] * self.num_grids
-        self.gridReverseTree = [ [] for i in range(self.num_grids)]
-        self.gridTree = [ [] for i in range(self.num_grids)]
-        mylog.debug("Done creating grid objects")
-
-    def _populate_hierarchy(self):
-        self.__setup_grid_tree()
-        for i, grid in enumerate(self.grids):
-            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
-            grid._prepare_grid()
-            grid._setup_dx()
-
-    def __setup_grid_tree(self):
-        for i, grid in enumerate(self.grids):
-            children = self._get_grid_children(grid)
-            for child in children:
-                self.gridReverseTree[child.id].append(i)
-                self.gridTree[i].append(weakref.proxy(child))
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        dd["field_indexes"] = self.field_indexes
-        GridGeometryHandler._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return self.grids[mask]
-
-    def _setup_field_list(self):
-        self.derived_field_list = []
-        for field in self.field_info:
-            try:
-                fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
-            except:
-                continue
-            available = np.all([f in self.field_list for f in fd.requested])
-            if available: self.derived_field_list.append(field)
-        for field in self.field_list:
-            if field not in self.derived_field_list:
-                self.derived_field_list.append(field)
-
-    def _count_grids(self):
-        """this is already provided in 
-
-        """
-        pass
-
-    def _initialize_grid_arrays(self):
-        mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
-
-    def _parse_hierarchy(self):
-        pass
-    
-    def _detect_fields(self):
-        pass
-
-    def _setup_derived_fields(self):
-        pass
-
-    def _initialize_state_variables(self):
-        """override to not re-initialize num_grids in GridGeometryHandler.__init__
-
-        """
-        self._parallel_locking = False
-        self._data_file = None
-        self._data_mode = None
-        self._max_locations = {}
-    
-class MaestroLevel:
-    def __init__(self,level,ngrids):
-        self.level = level
-        self.ngrids = ngrids
-        self.grids = []
-    
-
-class MaestroStaticOutput(StaticOutput):
-    """
-    This class is a stripped down class that simply reads and parses
-    *filename*, without looking at the Maestro hierarchy.
-    """
-    _hierarchy_class = MaestroHierarchy
-    _fieldinfo_fallback = MaestroFieldInfo
-    _fieldinfo_known = KnownMaestroFields
-
-    def __init__(self, plotname, paramFilename=None, 
-                 data_style='maestro', paranoia=False,
-                 storage_filename = None):
-        """need to override for Maestro file structure.
-
-        plotname here will be a directory name
-        
-        paramFilename is usually named "job_info" and lives in the plotname
-        directory.  This file contains the "probin" namelist, which contains
-        the simulation parameters.
-
-        """
-        self.storage_filename = storage_filename
-        self.paranoid_read = paranoia
-        self.__ipfn = paramFilename
-
-        StaticOutput.__init__(self, plotname.rstrip("/"),
-                              data_style='maestro')
-
-        # this is the unit of time; NOT the current time
-        self.parameters["Time"] = 1 # second
-
-        self._parse_header_file()
-
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.fullplotdir, default)
-        return f
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        pname = args[0].rstrip("/")
-        return os.path.exists(os.path.join(pname, "job_info"))
-                
-        
-    def _parse_parameter_file(self):
-        """
-        Parses the parameter file and establishes the various
-        dictionaries.
-        """
-        _n_cellx = _n_celly = _n_cellz = 0
-        _prob_hi_x = _prob_hi_y = _prob_hi_z = 0.0
-        _prob_lo_x = _prob_lo_y = _prob_lo_z = 0.0
-
-        local_opts = {"n_cellx": int, 
-                      "n_celly": int,
-                      "n_cellz": int,
-                      "prob_hi_x": float, 
-                      "prob_hi_y": float, 
-                      "prob_hi_z": float,
-                      "prob_lo_x": float, 
-                      "prob_lo_y": float, 
-                      "prob_lo_z": float
-                      }
-
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
-        self.parameter_filename = self._localize(
-                self.__ipfn, 'job_info')
-        # Let's read the file
-        self.unique_identifier = \
-            int(os.stat(self.fullplotdir)[ST_CTIME])
-
-        _parameters = NameList("probin",self.parameter_filename)
-        for param, val in _parameters:
-            if local_opts.has_key(param):
-                exec("_%s = %s" % (param, val))
-            elif maestro2enzoDict.has_key(param):
-                paramName = maestro2enzoDict[param]
-                t = parameterTypes[paramName](val)
-                exec("self.%s = %s" % (paramName,t))
-
-        self.domain_dimensions = np.array([_n_cellx,_n_celly,_n_cellz])
-        self.domain_left_edge = np.array([_prob_lo_x,_prob_lo_y,_prob_lo_z])
-        self.domain_right_edge = np.array([_prob_hi_x,_prob_hi_y,_prob_hi_z])
-        
-        self.cosmological_simulation = self.current_redshift = \
-            self.omega_matter = self.omega_lambda = self.hubble_constant = 0
-
-        # set the current time to zero.  we will update this when we read the
-        # header file
-        self.current_time = 0.0
-
-
-    def _parse_header_file(self):
-        """
-        Parses the BoxLib header file to get any parameters stored
-        there. Hierarchy information is read out of this file in
-        MaestroHierarchy. 
-
-        Currently, only Time is read here.
-        """
-        header_file = open(os.path.join(self.fullplotdir,'Header'))
-        lines = header_file.readlines()
-        header_file.close()
-        n_fields = int(lines[1])
-        self.current_time = float(lines[3+n_fields])
-
-                
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the parameter file
-        """
-        self.units = {}
-        self.time_units = {}
-        self._setup_nounits_units()
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-        for key in yt2maestroFieldsDict:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units.  Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-
-class NameList(object):
-    """A simple class for storing the data in a FORTRAN namelist as a dict.
-    """
-    
-    def __init__(self, name=None, filename=None):
-        """Initialize the class.
-        
-        Arguments:
-        - `name`: the name of the namelist as seen in FORTRAN
-        - `filename`: the filename which contains namelist name
-        """
-        self._name = name.lower()
-        self._filename = filename
-
-        self._namelist = self._build_namelist()
-
-    def __getitem__(self, key):
-        """
-        Returns the value of variable key from the namelist.
-        """
-        if key in self._namelist: return self._namelist[key]
-        raise KeyError(key)
-
-    def __iter__(self):
-        return self._namelist.iteritems()
-
-    def _build_namelist(self):
-        """
-        Parse self.filename and grab the information from the namelist
-        """
-        lines = open(self._filename).read().lower()
-
-        namelist_name_pattern = "&%s" % self._name
-        namelist_name_finder = re.compile(namelist_name_pattern)
-        namelist_end_pattern = r"/"
-        namelist_end_finder = re.compile(namelist_end_pattern)
-
-        start = namelist_name_finder.search(lines).start()
-        end = namelist_end_finder.search(lines,start).start()
-
-        namelist_data_pattern = r"\s*(\w+ *= *[\w\.\-%\+]+)"
-        namelist_data_finder = re.compile(namelist_data_pattern)
-        
-        key_value_pairs = namelist_data_finder.findall(lines,start,end)
-
-        namelist = {}
-
-        for key_value_pair in key_value_pairs:
-            key, value = map(strip,map(rstrip,key_value_pair.split("=")))
-            namelist[key] = value
-
-        return namelist

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/definitions.py
--- a/yt/frontends/maestro/definitions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Various definitions for various other modules and routines - modified
-from Orion frontend.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.funcs import *
-
-parameterTypes = {"dimensionality": int,
-                 "refine_by": int
-                 }
-
-# converts the Maestro inputs file name to the Enzo/yt name expected
-# throughout the code. key is Maestro name, value is Enzo/yt equivalent
-maestro2enzoDict = {"dm_in": "dimensionality",
-                    "ref_ratio": "refine_by"
-                  }
-
-yt2maestroFieldsDict = {}
-maestro2ytFieldsDict = {}
-
-maestro_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/fields.py
--- a/yt/frontends/maestro/fields.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""
-Maestro-specific fields - borrows heavily from Orion frontend.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.utilities.physical_constants import \
-    mh, kboltz
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-
-KnownMaestroFields = FieldInfoContainer()
-add_maestro_field = KnownMaestroFields.add_field
-
-MaestroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = MaestroFieldInfo.add_field
-
-add_field("density", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("density")],
-          units=r"\rm{g}/\rm{cm}^3")
-MaestroFieldInfo["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
-
-translation_dict = {"x-velocity": "x_vel",
-                    "y-velocity": "y_vel",
-                    "z-velocity": "z_vel",
-                    "Density": "density",
-                    "Temperature": "tfromp"
-                   }
-
-def _generate_translation(mine, theirs):
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
-
-for f,v in translation_dict.items():
-    if v not in MaestroFieldInfo:
-        add_field(v, function=lambda a,b: None, take_log=False,
-                  validators = [ValidateDataField(v)])
-#    print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
-

diff -r 0975eb84d3dd454e0c107c6fa95781df5f9ac250 -r dce03e91a6a0d275bd3621fada080747fac510a5 yt/frontends/maestro/io.py
--- a/yt/frontends/maestro/io.py
+++ /dev/null
@@ -1,110 +0,0 @@
-"""
-Maestro data-file handling functions - identical to Orion frontend,
-but with all instances of "orion" changed to "maestro".
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-from yt.utilities.io_handler import \
-           BaseIOHandler
-
-from definitions import \
-    yt2maestroFieldsDict
-
-class IOHandlerNative(BaseIOHandler):
-
-    _data_style = "maestro"
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
-    def _read_data(self,grid,field):
-        """
-        reads packed multiFABs output by BoxLib in "NATIVE" format.
-
-        """
-        filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen,'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        if grid._paranoid:
-            mylog.warn("Maestro Native reader: Paranoid read mode.")
-            headerRe = re.compile(maestro_FAB_header_pattern)
-            bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian
-            # check endianness (this code is ugly. fix?)
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i'% bytesPerReal) #always a floating point
-
-            # determine size of FAB
-            start = np.array(map(int,start.split(',')))
-            stop = np.array(map(int,stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytesPerReal:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytesPerReal and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-
-        else:
-            start = grid.start_index
-            stop = grid.stop_index
-            dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytesPerReal
-
-        nElements = grid.ActiveDimensions.prod()
-
-        # one field has nElements*bytesPerReal bytes and is located
-        # nElements*bytesPerReal*field_index from the offset location
-        if yt2maestroFieldsDict.has_key(field):
-            fieldname = yt2maestroFieldsDict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = np.fromfile(inFile,count=nElements,dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
-
-        # we can/should also check against the max and min in the header file
-
-        inFile.close()
-        return field
-

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt-3.0/commits/d506268450c5/
Changeset:   d506268450c5
Branch:      yt-3.0
User:        MatthewTurk
Date:        2013-10-09 22:00:57
Summary:     Merged in MatthewTurk/yt-3.0 (pull request #104)

Refactoring Boxlib and porting to yt-3.0
Affected #:  48 files

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -398,7 +398,8 @@
             center, pf, field_parameters)
         self.left_edge = np.array(left_edge)
         self.level = level
-        rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+
+        rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
         rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1   # issue 602
         self.dds = self.pf.domain_width / rdx.astype("float64")
         self.ActiveDimensions = np.array(dims, dtype='int32')
@@ -488,9 +489,11 @@
         output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
                          for field in fields]
         domain_dims = self.pf.domain_dimensions.astype("int64") \
-                    * self.pf.refine_by**self.level
+                    * self.pf.relative_refinement(0, self.level)
         for chunk in self._data_source.chunks(fields, "io"):
             input_fields = [chunk[field] for field in fields]
+            # NOTE: This usage of "refine_by" is actually *okay*, because it's
+            # being used with respect to iref, which is *already* scaled!
             fill_region(input_fields, output_fields, self.level,
                         self.global_startindex, chunk.icoords, chunk.ires,
                         domain_dims, self.pf.refine_by)
@@ -647,10 +650,12 @@
         ls = self._initialize_level_state(fields)
         for level in range(self.level + 1):
             domain_dims = self.pf.domain_dimensions.astype("int64") \
-                        * self.pf.refine_by**level
+                        * self.pf.relative_refinement(0, self.level)
             for chunk in ls.data_source.chunks(fields, "io"):
                 chunk[fields[0]]
                 input_fields = [chunk[field] for field in fields]
+                # NOTE: This usage of "refine_by" is actually *okay*, because it's
+                # being used with respect to iref, which is *already* scaled!
                 fill_region(input_fields, ls.fields, ls.current_level,
                             ls.global_startindex, chunk.icoords,
                             chunk.ires, domain_dims, self.pf.refine_by)
@@ -682,14 +687,16 @@
     def _update_level_state(self, level_state):
         ls = level_state
         if ls.current_level >= self.level: return
+        rf = float(self.pf.relative_refinement(
+                    ls.current_level, ls.current_level + 1))
         ls.current_level += 1
-        ls.current_dx = self._base_dx / self.pf.refine_by**ls.current_level
+        ls.current_dx = self._base_dx / \
+            self.pf.relative_refinement(0, ls.current_level)
         self._setup_data_source(ls)
         LL = self.left_edge - self.pf.domain_left_edge
         ls.old_global_startindex = ls.global_startindex
         ls.global_startindex = np.rint(LL / ls.current_dx).astype('int64') - 1
         ls.domain_iwidth = np.rint(self.pf.domain_width/ls.current_dx).astype('int64') 
-        rf = float(self.pf.refine_by)
         input_left = (level_state.old_global_startindex + 0.5) * rf 
         width = (self.ActiveDimensions*self.dds)
         output_dims = np.rint(width/level_state.current_dx+0.5).astype("int32") + 2

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -56,6 +56,7 @@
         self.pf = self.hierarchy.parameter_file  # weakref already
         self._child_mask = self._child_indices = self._child_index_mask = None
         self.start_index = None
+        self.filename = filename
         self._last_mask = None
         self._last_count = -1
         self._last_selector_id = None

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -321,6 +321,9 @@
             raise RuntimeError
         return int(o2)
 
+    def relative_refinement(self, l0, l1):
+        return self.refine_by**(l1-l0)
+
 def _reconstruct_pf(*args, **kwargs):
     pfs = ParameterFileStore()
     pf = pfs.get_pf_hash(*args)

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/__init__.py
--- /dev/null
+++ b/yt/frontends/boxlib/__init__.py
@@ -0,0 +1,14 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/api.py
--- /dev/null
+++ b/yt/frontends/boxlib/api.py
@@ -0,0 +1,31 @@
+"""
+API for yt.frontends.orion
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .data_structures import \
+      BoxlibGrid, \
+      BoxlibHierarchy, \
+      BoxlibStaticOutput, \
+      OrionHierarchy, \
+      OrionStaticOutput, \
+      CastroStaticOutput, \
+      MaestroStaticOutput
+
+from .fields import \
+      OrionFieldInfo, \
+      KnownOrionFields, \
+      add_orion_field
+
+from .io import \
+      IOHandlerBoxlib

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/data_structures.py
--- /dev/null
+++ b/yt/frontends/boxlib/data_structures.py
@@ -0,0 +1,838 @@
+"""
+Data structures for Boxlib Codes 
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import re
+import weakref
+import itertools
+
+from collections import defaultdict
+from string import strip, rstrip
+from stat import ST_CTIME
+
+import numpy as np
+
+from yt.funcs import *
+from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
+from yt.data_objects.grid_patch import AMRGridPatch
+from yt.geometry.grid_geometry_handler import GridGeometryHandler
+from yt.data_objects.static_output import StaticOutput
+from yt.utilities.definitions import \
+    mpc_conversion, sec_conversion
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+    parallel_root_only
+from yt.utilities.lib import \
+    get_box_grids_level
+from yt.geometry.selection_routines import \
+    RegionSelector
+from yt.utilities.io_handler import \
+    io_registry
+
+from .definitions import \
+    orion2enzoDict, \
+    parameterDict
+from .fields import \
+    OrionFieldInfo, \
+    add_orion_field, \
+    KnownOrionFields
+from .io import IOHandlerBoxlib
+# This is what we use to find scientific notation that might include d's
+# instead of e's.
+_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
+# This is the dimensions in the Cell_H file for each level
+_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# This is the line that prefixes each set of data for a FAB in the FAB file
+_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
+                             r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
+                             "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
+
+
+
+class BoxlibGrid(AMRGridPatch):
+    _id_offset = 0
+    _offset = -1
+
+    def __init__(self, grid_id, offset, filename = None,
+                 hierarchy = None):
+        super(BoxlibGrid, self).__init__(grid_id, filename, hierarchy)
+        self._base_offset = offset
+        self._parent_id = []
+        self._children_ids = []
+
+    def _prepare_grid(self):
+        super(BoxlibGrid, self)._prepare_grid()
+        my_ind = self.id - self._id_offset
+        self.start_index = self.hierarchy.grid_start_index[my_ind]
+
+    def get_global_startindex(self):
+        return self.start_index
+
+    def _setup_dx(self):
+        # has already been read in and stored in hierarchy
+        my_ind = self.id - self._id_offset
+        self.dds = self.hierarchy.level_dds[self.Level,:]
+        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
+
+    def __repr__(self):
+        return "BoxlibGrid_%04i" % (self.id)
+
+    @property
+    def Parent(self):
+        if len(self._parent_id) == 0:
+            return None
+        return [self.hierarchy.grids[pid - self._id_offset]
+                for pid in self._parent_id]
+
+    @property
+    def Children(self):
+        return [self.hierarchy.grids[cid - self._id_offset]
+                for cid in self._children_ids]
+
+    def _seek(self, f):
+        # This will either seek to the _offset or figure out the correct
+        # _offset.
+        if self._offset == -1:
+            f.seek(self._base_offset, os.SEEK_SET)
+            f.readline()
+            self._offset = f.tell()
+        else:
+            f.seek(self._offset)
+
+    # We override here because we can have varying refinement levels
+    def select_ires(self, dobj):
+        mask = self._get_selector_mask(dobj.selector)
+        if mask is None: return np.empty(0, dtype='int64')
+        coords = np.empty(self._last_count, dtype='int64')
+        coords[:] = self.Level + self.pf.level_offsets[self.Level]
+        return coords
+
+    # Override this as well, since refine_by can vary
+    def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
+        rf = self.pf.ref_factors[self.Level]
+        if dlevel != 1:
+            raise NotImplementedError
+        gi, cgi = self.get_global_startindex(), child.get_global_startindex()
+        startIndex = np.maximum(0, cgi / rf - gi)
+        endIndex = np.minimum((cgi + child.ActiveDimensions) / rf - gi,
+                              self.ActiveDimensions)
+        endIndex += (startIndex == endIndex)
+        mask[startIndex[0]:endIndex[0],
+             startIndex[1]:endIndex[1],
+             startIndex[2]:endIndex[2]] = tofill
+
+class BoxlibHierarchy(GridGeometryHandler):
+    grid = BoxlibGrid
+    def __init__(self, pf, data_style='boxlib_native'):
+        self.data_style = data_style
+        self.header_filename = os.path.join(pf.output_dir, 'Header')
+        self.directory = pf.output_dir
+
+        GridGeometryHandler.__init__(self, pf, data_style)
+        self._cache_endianness(self.grids[-1])
+        #self._read_particles()
+
+    def _parse_hierarchy(self):
+        """
+        read the global header file for an Boxlib plotfile output.
+        """
+        self.max_level = self.parameter_file._max_level
+        header_file = open(self.header_filename,'r')
+        # We can now skip to the point in the file we want to start parsing at.
+        header_file.seek(self.parameter_file._header_mesh_start)
+
+        dx = []
+        for i in range(self.max_level + 1):
+            dx.append([float(v) for v in header_file.next().split()])
+        self.level_dds = np.array(dx, dtype="float64")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("yt only supports cartesian coordinates.")
+        if int(header_file.next()) != 0:
+            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+
+        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        self.grids = []
+        grid_counter = 0
+        for level in range(self.max_level + 1):
+            vals = header_file.next().split()
+            # should this be grid_time or level_time??
+            lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+            assert(lev == level)
+            nsteps = int(header_file.next())
+            for gi in range(ngrids):
+                xlo, xhi = [float(v) for v in header_file.next().split()]
+                ylo, yhi = [float(v) for v in header_file.next().split()]
+                zlo, zhi = [float(v) for v in header_file.next().split()]
+                self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
+                self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
+            # Now we get to the level header filename, which we open and parse.
+            fn = os.path.join(self.parameter_file.output_dir,
+                              header_file.next().strip())
+            level_header_file = open(fn + "_H")
+            level_dir = os.path.dirname(fn)
+            # We skip the first two lines, although they probably contain
+            # useful information I don't know how to decipher.
+            level_header_file.next()
+            level_header_file.next()
+            # Now we get the number of data files
+            ncomp_this_file = int(level_header_file.next())
+            # Skip the next line, and we should get the number of grids / FABs
+            # in this file
+            level_header_file.next()
+            # To decipher this next line, we expect something like:
+            # (8 0
+            # where the first is the number of FABs in this level.
+            ngrids = int(level_header_file.next().split()[0][1:])
+            # Now we can iterate over each and get the indices.
+            for gi in range(ngrids):
+                # components within it
+                start, stop = _dim_finder.match(level_header_file.next()).groups()
+                start = np.array(start.split(","), dtype="int64")
+                stop = np.array(stop.split(","), dtype="int64")
+                dims = stop - start + 1
+                self.grid_dimensions[grid_counter + gi,:] = dims
+                self.grid_start_index[grid_counter + gi,:] = start
+            # Now we read two more lines.  The first of these is a close
+            # parenthesis.
+            level_header_file.next()
+            # This line I'm not 100% sure of, but it's either number of grids
+            # or number of FABfiles.
+            level_header_file.next()
+            # Now we iterate over grids to find their offsets in each file.
+            for gi in range(ngrids):
+                # Now we get the data file, at which point we're ready to
+                # create the grid.
+                dummy, filename, offset = level_header_file.next().split()
+                filename = os.path.join(level_dir, filename)
+                go = self.grid(grid_counter + gi, int(offset), filename, self)
+                go.Level = self.grid_levels[grid_counter + gi,:] = level
+                self.grids.append(go)
+            grid_counter += ngrids
+            # already read the filenames above...
+        self.float_type = 'float64'
+
+    def _cache_endianness(self,test_grid):
+        """
+        Cache the endianness and bytes perreal of the grids by using a
+        test grid and assuming that all grids have the same
+        endianness. This is a pretty safe assumption since Boxlib uses
+        one file per processor, and if you're running on a cluster
+        with different endian processors, then you're on your own!
+        """
+        # open the test file & grab the header
+        with open(os.path.expanduser(test_grid.filename), 'rb') as f:
+            header = f.readline()
+        
+        bpr, endian, start, stop, centering, nc = \
+            _header_pattern.search(header).groups()
+        # Note that previously we were using a different value for BPR than we
+        # use now.  Here is an example set of information directly from BoxLib:
+        #  * DOUBLE data
+        #  * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27
+        #  * FLOAT data
+        #  * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27
+        if bpr == endian[0]:
+            dtype = '<f%s' % bpr
+        elif bpr == endian[-1]:
+            dtype = '>f%s' % bpr
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        mylog.debug("FAB header suggests dtype of %s", dtype)
+        self._dtype = np.dtype(dtype)
+
+    def _populate_grid_objects(self):
+        mylog.debug("Creating grid objects")
+        self.grids = np.array(self.grids, dtype='object')
+        self._reconstruct_parent_child()
+        for i, grid in enumerate(self.grids):
+            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            grid._prepare_grid()
+            grid._setup_dx()
+        mylog.debug("Done creating grid objects")
+
+    def _reconstruct_parent_child(self):
+        mask = np.empty(len(self.grids), dtype='int32')
+        mylog.debug("First pass; identifying child grids")
+        for i, grid in enumerate(self.grids):
+            get_box_grids_level(self.grid_left_edge[i,:],
+                                self.grid_right_edge[i,:],
+                                self.grid_levels[i] + 1,
+                                self.grid_left_edge, self.grid_right_edge,
+                                self.grid_levels, mask)
+            ids = np.where(mask.astype("bool")) # where is a tuple
+            grid._children_ids = ids[0] + grid._id_offset 
+        mylog.debug("Second pass; identifying parents")
+        for i, grid in enumerate(self.grids): # Second pass
+            for child in grid.Children:
+                child._parent_id.append(i + grid._id_offset)
+
+    def _count_grids(self):
+        # We can get everything from the Header file, but note that we're
+        # duplicating some work done elsewhere.  In a future where we don't
+        # pre-allocate grid arrays, this becomes unnecessary.
+        header_file = open(self.header_filename, 'r')
+        header_file.seek(self.parameter_file._header_mesh_start)
+        # Skip over the level dxs, geometry and the zero:
+        [header_file.next() for i in range(self.parameter_file._max_level + 3)]
+        # Now we need to be very careful, as we've seeked, and now we iterate.
+        # Does this work?  We are going to count the number of places that we
+        # have a three-item line.  The three items would be level, number of
+        # grids, and then grid time.
+        self.num_grids = 0
+        for line in header_file:
+            if len(line.split()) != 3: continue
+            self.num_grids += int(line.split()[1])
+        
+    def _initialize_grid_arrays(self):
+        super(BoxlibHierarchy, self)._initialize_grid_arrays()
+        self.grid_start_index = np.zeros((self.num_grids,3), 'int64')
+
+    def _initialize_state_variables(self):
+        """override to not re-initialize num_grids in AMRHierarchy.__init__
+
+        """
+        self._parallel_locking = False
+        self._data_file = None
+        self._data_mode = None
+        self._max_locations = {}
+
+    def _detect_fields(self):
+        # This is all done in _parse_header_file
+        self.field_list = self.parameter_file._field_list[:]
+        self.field_indexes = dict((f, i)
+                                for i, f in enumerate(self.field_list))
+        # There are times when field_list may change.  We copy it here to
+        # avoid that possibility.
+        self.field_order = [f for f in self.field_list]
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        GridGeometryHandler._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.parameter_file)
+
+class BoxlibStaticOutput(StaticOutput):
+    """
+    This class is a stripped down class that simply reads and parses
+    *filename*, without looking at the Boxlib hierarchy.
+    """
+    _hierarchy_class = BoxlibHierarchy
+    _fieldinfo_fallback = OrionFieldInfo
+    _fieldinfo_known = KnownOrionFields
+    _output_prefix = None
+
+    # THIS SHOULD BE FIXED:
+    periodicity = (True, True, True)
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='boxlib_native',
+                 storage_filename = None):
+        """
+        The paramfile is usually called "inputs"
+        and there may be a fortran inputs file usually called "probin"
+        plotname here will be a directory name
+        as per BoxLib, data_style will be Native (implemented here), IEEE (not
+        yet implemented) or ASCII (not yet implemented.)
+        """
+        self.output_dir = os.path.abspath(os.path.expanduser(output_dir))
+        self.cparam_filename = self._localize_check(cparam_filename)
+        self.fparam_filename = self._localize_check(fparam_filename)
+        self.storage_filename = storage_filename
+
+        StaticOutput.__init__(self, output_dir, data_style)
+
+        # These are still used in a few places.
+        self.parameters["HydroMethod"] = 'boxlib'
+        self.parameters["Time"] = 1. # default unit is 1...
+        self.parameters["EOSType"] = -1 # default
+
+    def _localize_check(self, fn):
+        # If the file exists, use it.  If not, set it to None.
+        root_dir = os.path.dirname(self.output_dir)
+        full_fn = os.path.join(root_dir, fn)
+        if os.path.exists(full_fn):
+            return full_fn
+        return None
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename) and \
+           not os.path.exists(jobinfo_filename):
+            return True # We have no parameters to go off of
+        # If we do have either inputs or jobinfo, we should be deferring to a
+        # different frontend.
+        return False
+
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+        self._parse_header_file()
+        # Let's read the file
+        hfn = os.path.join(self.output_dir, 'Header')
+        self.unique_identifier = int(os.stat(hfn)[ST_CTIME])
+        # the 'inputs' file is now optional
+        self._parse_cparams()
+        self._parse_fparams()
+
+    def _parse_cparams(self):
+        if self.cparam_filename is None:
+            return
+        for line in (line.split("#")[0].strip() for line in
+                     open(self.cparam_filename)):
+            if len(line) == 0: continue
+            param, vals = [s.strip() for s in line.split("=")]
+            if param == "amr.n_cell":
+                vals = self.domain_dimensions = np.array(vals.split(), dtype='int32')
+            elif param == "amr.ref_ratio":
+                vals = self.refine_by = int(vals[0])
+            elif param == "Prob.lo_bc":
+                vals = self.periodicity = ensure_tuple([i == 0 for i in vals.split()])
+            elif param == "castro.use_comoving":
+                vals = self.cosmological_simulation = int(vals)
+            else:
+                # Now we guess some things about the parameter and its type
+                v = vals.split()[0] # Just in case there are multiple; we'll go
+                                    # back afterward to using vals.
+                try:
+                    float(v.upper().replace("D","E"))
+                except:
+                    pcast = str
+                else:
+                    syms = (".", "D+", "D-", "E+", "E-")
+                    if any(sym in v.upper() for sym in syms for v in vals.split()):
+                        pcast = float
+                    else:
+                        pcast = int
+                vals = [pcast(v) for v in vals.split()]
+                if len(vals) == 1: vals = vals[0]
+            self.parameters[param] = vals
+
+        if getattr(self, "cosmological_simulation", 0) == 1:
+            self.omega_lambda = self.parameters["comoving_OmL"]
+            self.omega_matter = self.parameters["comoving_OmM"]
+            self.hubble_constant = self.parameters["comoving_h"]
+            a_file = open(os.path.join(self.output_dir,'comoving_a'))
+            line = a_file.readline().strip()
+            a_file.close()
+            self.current_redshift = 1/float(line) - 1
+        else:
+            self.current_redshift = self.omega_lambda = self.omega_matter = \
+                self.hubble_constant = self.cosmological_simulation = 0.0
+
+    def _parse_fparams(self):
+        """
+        Parses the fortran parameter file for Orion. Most of this will
+        be useless, but this is where it keeps mu = mass per
+        particle/m_hydrogen.
+        """
+        if self.fparam_filename is None:
+            return
+        for line in (l for l in open(self.fparam_filename) if "=" in l):
+            param, vals = [v.strip() for v in line.split("=")]
+            # Now, there are a couple different types of parameters.
+            # Some will be where you only have floating point values, others
+            # will be where things are specified as string literals.
+            # Unfortunately, we're also using Fortran values, which will have
+            # things like 1.d-2 which is pathologically difficult to parse if
+            # your C library doesn't include 'd' in its locale for strtod.
+            # So we'll try to determine this.
+            vals = vals.split()
+            if any(_scinot_finder.match(v) for v in vals):
+                vals = [float(v.replace("D","e").replace("d","e"))
+                        for v in vals]
+            if len(vals) == 1:
+                vals = vals[0]
+            self.parameters[param] = vals
+
+    def _parse_header_file(self):
+        """
+        We parse the Boxlib header, which we use as our basis.  Anything in the
+        inputs file will override this, but the inputs file is not strictly
+        necessary for orientation of the data in space.
+        """
+
+        # Note: Python uses a read-ahead buffer, so using .next(), which would
+        # be my preferred solution, won't work here.  We have to explicitly
+        # call readline() if we want to end up with an offset at the very end.
+        # Fortunately, elsewhere we don't care about the offset, so we're fine
+        # everywhere else using iteration exclusively.
+        header_file = open(os.path.join(self.output_dir,'Header'))
+        self.orion_version = header_file.readline().rstrip()
+        n_fields = int(header_file.readline())
+
+        self._field_list = [header_file.readline().strip()
+                           for i in range(n_fields)]
+
+        self.dimensionality = int(header_file.readline())
+        if self.dimensionality != 3:
+            raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+        self.current_time = float(header_file.readline())
+        # This is traditionally a hierarchy attribute, so we will set it, but
+        # in a slightly hidden variable.
+        self._max_level = int(header_file.readline()) 
+        self.domain_left_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        self.domain_right_edge = np.array(header_file.readline().split(),
+                                         dtype="float64")
+        ref_factors = np.array([int(i) for i in
+                                header_file.readline().split()])
+        if ref_factors.size == 0:
+            # We use a default of two, as Nyx doesn't always output this value
+            ref_factors = [2] * (self._max_level + 1)
+        # We can't vary refinement factors based on dimension, or whatever else
+        # they are vaied on.  In one curious thing, I found that some Castro 3D
+        # data has only two refinement factors, which I don't know how to
+        # understand.
+        self.ref_factors = ref_factors
+        if np.unique(ref_factors).size > 1:
+            # We want everything to be a multiple of this.
+            self.refine_by = min(ref_factors)
+            # Check that they're all multiples of the minimum.
+            if not all(float(rf)/self.refine_by ==
+                   int(float(rf)/self.refine_by) for rf in ref_factors):
+                raise RuntimeError
+            base_log = np.log2(self.refine_by)
+            self.level_offsets = [0] # level 0 has to have 0 offset
+            lo = 0
+            for lm1, rf in enumerate(self.ref_factors):
+                lo += int(np.log2(rf) / base_log) - 1
+                self.level_offsets.append(lo)
+        #assert(np.unique(ref_factors).size == 1)
+        else:
+            self.refine_by = ref_factors[0]
+            self.level_offsets = [0 for l in range(self._max_level + 1)]
+        # Now we read the global index space, to get 
+        index_space = header_file.readline()
+        # This will be of the form:
+        #  ((0,0,0) (255,255,255) (0,0,0)) ((0,0,0) (511,511,511) (0,0,0))
+        # So note that if we split it all up based on spaces, we should be
+        # fine, as long as we take the first two entries, which correspond to
+        # the root level.  I'm not 100% pleased with this solution.
+        root_space = index_space.replace("(","").replace(")","").split()[:2]
+        start = np.array(root_space[0].split(","), dtype="int64")
+        stop = np.array(root_space[1].split(","), dtype="int64")
+        self.domain_dimensions = stop - start + 1
+        # Skip timesteps per level
+        header_file.readline()
+        self._header_mesh_start = header_file.tell()
+
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = 1.0 / sec_conversion[unit]
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+            
+    @parallel_root_only
+    def print_key_parameters(self):
+        for a in ["current_time", "domain_dimensions", "domain_left_edge",
+                  "domain_right_edge"]:
+            if not hasattr(self, a):
+                mylog.error("Missing %s in parameter file definition!", a)
+                continue
+            v = getattr(self, a)
+            mylog.info("Parameters: %-25s = %s", a, v)
+
+    def relative_refinement(self, l0, l1):
+        offset = self.level_offsets[l1] - self.level_offsets[l0]
+        return self.refine_by**(l1-l0 + offset)
+
+class OrionHierarchy(BoxlibHierarchy):
+    
+    def __init__(self, pf, data_style='orion_native'):
+        BoxlibHierarchy.__init__(self, pf, data_style)
+        self._read_particles()
+        #self.io = IOHandlerOrion
+
+    def _read_particles(self):
+        """
+        reads in particles and assigns them to grids. Will search for
+        Star particles, then sink particles if no star particle file
+        is found, and finally will simply note that no particles are
+        found if neither works. To add a new Orion particle type,
+        simply add it to the if/elif/else block.
+
+        """
+        self.grid_particle_count = np.zeros(len(self.grids))
+
+        for particle_filename in ["StarParticles", "SinkParticles"]:
+            fn = os.path.join(self.pf.output_dir, particle_filename)
+            if os.path.exists(fn): self._read_particle_file(fn)
+
+    def _read_particle_file(self, fn):
+        """actually reads the orion particle data file itself.
+
+        """
+        if not os.path.exists(fn): return
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            self.num_stars = int(lines[0].strip()[0])
+            for line in lines[1:]:
+                particle_position_x = float(line.split(' ')[1])
+                particle_position_y = float(line.split(' ')[2])
+                particle_position_z = float(line.split(' ')[3])
+                coord = [particle_position_x, particle_position_y, particle_position_z]
+                # for each particle, determine which grids contain it
+                # copied from object_finding_mixin.py
+                mask=np.ones(self.num_grids)
+                for i in xrange(len(coord)):
+                    np.choose(np.greater(self.grid_left_edge[:,i],coord[i]), (mask,0), mask)
+                    np.choose(np.greater(self.grid_right_edge[:,i],coord[i]), (0,mask), mask)
+                ind = np.where(mask == 1)
+                selected_grids = self.grids[ind]
+                # in orion, particles always live on the finest level.
+                # so, we want to assign the particle to the finest of
+                # the grids we just found
+                if len(selected_grids) != 0:
+                    grid = sorted(selected_grids, key=lambda grid: grid.Level)[-1]
+                    ind = np.where(self.grids == grid)[0][0]
+                    self.grid_particle_count[ind] += 1
+                    self.grids[ind].NumberOfParticles += 1
+        return True
+                
+class OrionStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = OrionHierarchy
+
+    def __init__(self, output_dir,
+                 cparam_filename = "inputs",
+                 fparam_filename = "probin",
+                 data_style='orion_native',
+                 storage_filename = None):
+
+        BoxlibStaticOutput.__init__(self, output_dir,
+                 cparam_filename, fparam_filename, data_style)
+          
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args                                                                               
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.                                      
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow                                                     
+        inputs_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['cparam_filename'])
+        if not os.path.exists(inputs_filename):
+            return False
+        if os.path.exists(jobinfo_filename):
+            return False
+        # Now we check for all the others                                                             
+        lines = open(inputs_filename).readlines()
+        if any(("castro." in line for line in lines)): return False
+        if any(("nyx." in line for line in lines)): return False
+        if any(("geometry.prob_lo" in line for line in lines)): return True
+        return False
+
+class CastroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+class MaestroStaticOutput(BoxlibStaticOutput):
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        output_dir = args[0]
+        header_filename = os.path.join(output_dir, "Header")
+        jobinfo_filename = os.path.join(output_dir, "job_info")
+        if not os.path.exists(header_filename):
+            # We *know* it's not boxlib if Header doesn't exist.
+            return False
+        args = inspect.getcallargs(cls.__init__, args, kwargs)
+        # This might need to be localized somehow
+        fparam_filename = os.path.join(
+                            os.path.dirname(os.path.abspath(output_dir)),
+                            args['fparam_filename'])
+        if not os.path.exists(jobinfo_filename):
+            return False
+        if not os.path.exists(fparam_filename):
+            return False
+        # Now we check for all the others
+        lines = open(jobinfo_filename).readlines()
+        # Maestro outputs have "Castro" in them
+        if any(line.startswith("Castro   ") for line in lines): return True
+        return False
+
+
+class NyxHierarchy(BoxlibHierarchy):
+
+    def __init__(self, pf, data_style='nyx_native'):
+        super(NyxHierarchy, self).__init__(pf, data_style)
+        self._read_particle_header()
+
+    def _read_particle_header(self):
+        if not self.pf.parameters["particles.write_in_plotfile"]:
+            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
+            return
+        for fn in ['particle_position_%s' % ax for ax in 'xyz'] + \
+                  ['particle_mass'] +  \
+                  ['particle_velocity_%s' % ax for ax in 'xyz']:
+            self.field_list.append(("io", fn))
+        header = open(os.path.join(self.pf.output_dir, "DM", "Header"))
+        version = header.readline()
+        ndim = header.readline()
+        nfields = header.readline()
+        ntotalpart = int(header.readline())
+        dummy = header.readline() # nextid
+        maxlevel = int(header.readline()) # max level
+
+        # Skip over how many grids on each level; this is degenerate
+        for i in range(maxlevel + 1):dummy = header.readline()
+
+        grid_info = np.fromiter((int(i) for line in header.readlines()
+                                 for i in line.split()),
+                                dtype='int64',
+                                count=3*self.num_grids).reshape((self.num_grids, 3))
+        # we need grid_info in `populate_grid_objects`, so save it to self
+
+        for g, pg in itertools.izip(self.grids, grid_info):
+            g.particle_filename = os.path.join(self.pf.output_dir, "DM",
+                                               "Level_%s" % (g.Level),
+                                               "DATA_%04i" % pg[0])
+            g.NumberOfParticles = pg[1]
+            g._particle_offset = pg[2]
+
+        self.grid_particle_count[:, 0] = grid_info[:, 1]
+
+class NyxStaticOutput(BoxlibStaticOutput):
+
+    _hierarchy_class = NyxHierarchy
+
+    @classmethod
+    def _is_valid(cls, *args, **kwargs):
+        # fill our args
+        pname = args[0].rstrip("/")
+        dn = os.path.dirname(pname)
+        if len(args) > 1:
+            kwargs['paramFilename'] = args[1]
+
+        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
+
+        # @todo: new Nyx output.
+        # We check for the job_info file's existence because this is currently
+        # what distinguishes Nyx data from MAESTRO data.
+        pfn = os.path.join(pfname)
+        if not os.path.exists(pfn): return False
+        nyx = any(("nyx." in line for line in open(pfn)))
+        maestro = os.path.exists(os.path.join(pname, "job_info"))
+        orion = (not nyx) and (not maestro)
+        return nyx
+
+    def _parse_parameter_file(self):
+        super(NyxStaticOutput, self)._parse_parameter_file()
+        #return
+        # Nyx is always cosmological.
+        self.cosmological_simulation = 1
+        self.omega_lambda = self.parameters["comoving_OmL"]
+        self.omega_matter = self.parameters["comoving_OmM"]
+        self.hubble_constant = self.parameters["comoving_h"]
+
+        # Read in the `comoving_a` file and parse the value. We should fix this
+        # in the new Nyx output format...
+        a_file = open(os.path.join(self.output_dir, "comoving_a"))
+        a_string = a_file.readline().strip()
+        a_file.close()
+
+        # Set the scale factor and redshift
+        self.cosmological_scale_factor = float(a_string)
+        self.parameters["CosmologyCurrentRedshift"] = 1 / float(a_string) - 1
+
+        # alias
+        self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
+        if self.parameters["particles.write_in_plotfile"]:
+            self.particle_types = ("io",)
+            self.particle_types_raw = self.particle_types
+
+    def _set_units(self):
+        super(NyxStaticOutput, self)._set_units()
+        # Masses are always in $ M_{\odot} $
+        self.units["particle_mass"] = 1.989e33
+
+        mylog.warning("Length units: setting 1.0 = 1.0 Mpc.")
+        self.units.update(mpc_conversion)
+        self.units["density"] = self.units["particle_mass"]/(self.units["cm"])**3
+        self.units["particle_mass_density"] = self.units["density"]
+
+        mylog.warning("Time units: setting 1.0 = Mpc/km s ~ 10^12 yr .")
+        self.time_units["s"] = 1.0 / 3.08568025e19
+        self.conversion_factors["Time"] = 1.0 / 3.08568025e19
+
+        cf = 1e5 * (self.cosmological_scale_factor)
+        for ax in "xyz":
+            self.units["particle_velocity_%s" % ax] = cf
+
+        for unit in sec_conversion.keys():
+            self.time_units[unit] = self.time_units["s"] / sec_conversion[unit]
+

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/definitions.py
--- /dev/null
+++ b/yt/frontends/boxlib/definitions.py
@@ -0,0 +1,62 @@
+"""
+Various definitions for various other modules and routines
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+from yt.funcs import *
+
+# TODO: get rid of enzo parameters we do not need
+parameterDict = {"CosmologyCurrentRedshift": float,
+                 "CosmologyComovingBoxSize": float,
+                 "CosmologyOmegaMatterNow": float,
+                 "CosmologyOmegaLambdaNow": float,
+                 "CosmologyHubbleConstantNow": float,
+                 "CosmologyInitialRedshift": float,
+                 "DualEnergyFormalismEta1": float,
+                 "DualEnergyFormalismEta2": float,
+                 "MetaDataString": str,
+                 "HydroMethod": int,
+                 "DualEnergyFormalism": int,
+                 "InitialTime": float,
+                 "ComovingCoordinates": int,
+                 "DensityUnits": float,
+                 "LengthUnits": float,
+                 "LengthUnit": float,
+                 "TemperatureUnits": float,
+                 "TimeUnits": float,
+                 "GravitationalConstant": float,
+                 "Gamma": float,
+                 "MultiSpecies": int,
+                 "CompilerPrecision": str,
+                 "CurrentTimeIdentifier": int,
+                 "RefineBy": int,
+                 "BoundaryConditionName": str,
+                 "TopGridRank": int,
+                 "TopGridDimensions": int,
+                 "EOSSoundSpeed": float,
+                 "EOSType": int,
+                 "NumberOfParticleAttributes": int,
+                }
+
+
+# converts the Orion inputs file name to the Enzo/yt name expected
+# throughout the code. key is Orion name, value is Enzo/yt equivalent
+orion2enzoDict = {"amr.n_cell": "TopGridDimensions",
+                  "materials.gamma": "Gamma",
+                  "amr.ref_ratio": "RefineBy",
+                  "castro.use_comoving": "ComovingCoordinates",
+                  "castro.redshift_in": "CosmologyInitialRedshift",
+                  "comoving_OmL": "CosmologyOmegaLambdaNow",
+                  "comoving_OmM": "CosmologyOmegaMatterNow",
+                  "comoving_h": "CosmologyHubbleConstantNow"
+                  }
+

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/fields.py
--- /dev/null
+++ b/yt/frontends/boxlib/fields.py
@@ -0,0 +1,183 @@
+"""
+Orion-specific fields
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+
+from yt.utilities.physical_constants import \
+    mh, kboltz
+from yt.data_objects.field_info_container import \
+    FieldInfoContainer, \
+    NullFunc, \
+    TranslationFunc, \
+    FieldInfo, \
+    ValidateParameter, \
+    ValidateDataField, \
+    ValidateProperty, \
+    ValidateSpatial, \
+    ValidateGridType
+import yt.data_objects.universal_fields
+
+
+KnownOrionFields = FieldInfoContainer()
+add_orion_field = KnownOrionFields.add_field
+
+OrionFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+add_field = OrionFieldInfo.add_field
+
+add_orion_field("density", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("density")],
+                units=r"\rm{g}/\rm{cm}^3")
+KnownOrionFields["density"]._projected_units =r"\rm{g}/\rm{cm}^2"
+
+add_orion_field("eden", function=NullFunc, take_log=True,
+                validators = [ValidateDataField("eden")],
+                units=r"\rm{erg}/\rm{cm}^3")
+
+add_orion_field("xmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("xmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+add_orion_field("ymom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("ymom")],
+                units=r"\rm{gm}/\rm{cm^2\ s}")
+
+add_orion_field("zmom", function=NullFunc, take_log=False,
+                validators = [ValidateDataField("zmom")],
+                units=r"\rm{g}/\rm{cm^2\ s}")
+
+translation_dict = {"x-velocity": "xvel",
+                    "y-velocity": "yvel",
+                    "z-velocity": "zvel",
+                    "Density": "density",
+                    "TotalEnergy": "eden",
+                    "Temperature": "temperature",
+                    "x-momentum": "xmom",
+                    "y-momentum": "ymom",
+                    "z-momentum": "zmom"
+                   }
+
+for f,v in translation_dict.items():
+    if v not in KnownOrionFields:
+        add_orion_field(v, function=NullFunc, take_log=False,
+                  validators = [ValidateDataField(v)])
+    ff = KnownOrionFields[v]
+    add_field(f, TranslationFunc(v),
+              take_log=KnownOrionFields[v].take_log,
+              units = ff._units, display_name=f)
+
+def _xVelocity(field, data):
+    """generate x-velocity from x-momentum and density
+    
+    """
+    return data["xmom"]/data["density"]
+add_orion_field("x-velocity",function=_xVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _yVelocity(field,data):
+    """generate y-velocity from y-momentum and density
+
+    """
+    #try:
+    #    return data["xvel"]
+    #except KeyError:
+    return data["ymom"]/data["density"]
+add_orion_field("y-velocity",function=_yVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _zVelocity(field,data):
+    """generate z-velocity from z-momentum and density
+    
+    """
+    return data["zmom"]/data["density"]
+add_orion_field("z-velocity",function=_zVelocity, take_log=False,
+                units=r'\rm{cm}/\rm{s}')
+
+def _ThermalEnergy(field, data):
+    """generate thermal (gas energy). Dual Energy Formalism was
+        implemented by Stella, but this isn't how it's called, so I'll
+        leave that commented out for now.
+    """
+    #if data.pf["DualEnergyFormalism"]:
+    #    return data["GasEnergy"]
+    #else:
+    return data["TotalEnergy"] - 0.5 * data["density"] * (
+        data["x-velocity"]**2.0
+        + data["y-velocity"]**2.0
+        + data["z-velocity"]**2.0 )
+add_field("ThermalEnergy", function=_ThermalEnergy,
+                units=r"\rm{ergs}/\rm{cm^3}")
+
+def _Pressure(field,data):
+    """M{(Gamma-1.0)*e, where e is thermal energy density
+       NB: this will need to be modified for radiation
+    """
+    return (data.pf["Gamma"] - 1.0)*data["ThermalEnergy"]
+add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
+
+def _Temperature(field,data):
+    return (data.pf["Gamma"]-1.0)*data.pf["mu"]*mh*data["ThermalEnergy"]/(kboltz*data["Density"])
+add_field("Temperature",function=_Temperature,units=r"\rm{Kelvin}",take_log=False)
+
+# particle fields
+
+def particle_func(p_field, dtype='float64'):
+    def _Particles(field, data):
+        io = data.hierarchy.io
+        if not data.NumberOfParticles > 0:
+            return np.array([], dtype=dtype)
+        else:
+            return io._read_particles(data, p_field).astype(dtype)
+
+    return _Particles
+
+_particle_field_list = ["mass", 
+                        "position_x",
+                        "position_y",
+                        "position_z",
+                        "momentum_x",
+                        "momentum_y",
+                        "momentum_z",
+                        "angmomen_x",
+                        "angmomen_y",
+                        "angmomen_z",
+                        "mlast",
+                        "r",
+                        "mdeut",
+                        "n",
+                        "mdot",
+                        "burnstate",
+                        "luminosity",
+                        "id"]
+
+for pf in _particle_field_list:
+    pfunc = particle_func("particle_%s" % (pf))
+    add_field("particle_%s" % pf, function=pfunc,
+              validators = [ValidateSpatial(0)],
+              particle_type=True)
+
+def _ParticleMass(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles
+
+def _ParticleMassMsun(field, data):
+    particles = data["particle_mass"].astype('float64')
+    return particles/1.989e33
+
+add_field("ParticleMass",
+          function=_ParticleMass, validators=[ValidateSpatial(0)],
+          particle_type=True)
+add_field("ParticleMassMsun",
+          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
+          particle_type=True)

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/io.py
--- /dev/null
+++ b/yt/frontends/boxlib/io.py
@@ -0,0 +1,195 @@
+"""
+Orion data-file handling functions
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import os
+import numpy as np
+from yt.utilities.lib import \
+    read_castro_particles, \
+    read_and_seek
+from yt.utilities.io_handler import \
+           BaseIOHandler
+from yt.funcs import mylog, defaultdict
+
+class IOHandlerBoxlib(BaseIOHandler):
+
+    _data_style = "boxlib_native"
+
+    def __init__(self, pf, *args, **kwargs):
+        self.pf = pf
+
+    def _read_fluid_selection(self, chunks, selector, fields, size):
+        chunks = list(chunks)
+        if any((ftype != "gas" for ftype, fname in fields)):
+            raise NotImplementedError
+        rv = {}
+        for field in fields:
+            rv[field] = np.empty(size, dtype="float64")
+        ng = sum(len(c.objs) for c in chunks)
+        mylog.debug("Reading %s cells of %s fields in %s grids",
+                    size, [f2 for f1, f2 in fields], ng)
+        ind = 0
+        for chunk in chunks:
+            data = self._read_chunk_data(chunk, fields)
+            for g in chunk.objs:
+                for field in fields:
+                    ftype, fname = field
+                    ds = data[g.id].pop(fname)
+                    nd = g.select(selector, ds, rv[field], ind) # caches
+                ind += nd
+                data.pop(g.id)
+        return rv
+
+    def _read_chunk_data(self, chunk, fields):
+        data = {}
+        grids_by_file = defaultdict(list)
+        if len(chunk.objs) == 0: return data
+        for g in chunk.objs:
+            if g.filename is None:
+                continue
+            grids_by_file[g.filename].append(g)
+        dtype = self.pf.hierarchy._dtype
+        bpr = dtype.itemsize
+        field_list = set(f[1] for f in fields)
+        for filename in grids_by_file:
+            grids = grids_by_file[filename]
+            grids.sort(key = lambda a: a._offset)
+            f = open(filename, "rb")
+            for grid in grids:
+                data[grid.id] = {}
+                grid._seek(f)
+                count = grid.ActiveDimensions.prod()
+                size = count * bpr
+                for field in self.pf.hierarchy.field_order:
+                    if field in field_list:
+                        # We read it ...
+                        v = np.fromfile(f, dtype=dtype, count=count)
+                        v = v.reshape(grid.ActiveDimensions, order='F')
+                        data[grid.id][field] = v
+                    else:
+                        f.seek(size, os.SEEK_CUR)
+        return data
+
+class IOHandlerOrion(IOHandlerBoxlib):
+    _data_style = "orion_native"
+
+    def _read_particles(self, grid, field): 
+        """
+        parses the Orion Star Particle text files
+        
+        """
+
+        fn = grid.pf.fullplotdir + "/StarParticles"
+        if not os.path.exists(fn):
+            fn = grid.pf.fullplotdir + "/SinkParticles"
+
+        # Figure out the format of the particle file
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+        line = lines[1]
+        
+        # The basic fields that all sink particles have
+        index = {'particle_mass': 0,
+                 'particle_position_x': 1,
+                 'particle_position_y': 2,
+                 'particle_position_z': 3,
+                 'particle_momentum_x': 4,
+                 'particle_momentum_y': 5,
+                 'particle_momentum_z': 6,
+                 'particle_angmomen_x': 7,
+                 'particle_angmomen_y': 8,
+                 'particle_angmomen_z': 9,
+                 'particle_mlast': 10,
+                 'particle_mdeut': 11,
+                 'particle_n': 12,
+                 'particle_mdot': 13,
+                 'particle_burnstate': 14,
+                 'particle_id': 15}
+
+        if len(line.strip().split()) == 11:
+            # these are vanilla sinks, do nothing
+            pass  
+
+        elif len(line.strip().split()) == 17:
+            # these are old-style stars, add stellar model parameters
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15
+
+        elif len(line.strip().split()) == 18:
+            # these are the newer style, add luminosity as well
+            index['particle_mlast']     = 10
+            index['particle_r']         = 11
+            index['particle_mdeut']     = 12
+            index['particle_n']         = 13
+            index['particle_mdot']      = 14,
+            index['particle_burnstate'] = 15,
+            index['particle_luminosity']= 16
+
+        else:
+            # give a warning if none of the above apply:
+            mylog.warning('Warning - could not figure out particle output file')
+            mylog.warning('These results could be nonsense!')
+
+        def read(line, field):
+            return float(line.strip().split(' ')[index[field]])
+
+        with open(fn, 'r') as f:
+            lines = f.readlines()
+            particles = []
+            for line in lines[1:]:
+                if grid.NumberOfParticles > 0:
+                    coord = read(line, "particle_position_x"), \
+                            read(line, "particle_position_y"), \
+                            read(line, "particle_position_z") 
+                    if ( (grid.LeftEdge < coord).all() and 
+                         (coord <= grid.RightEdge).all() ):
+                        particles.append(read(line, field))
+        return np.array(particles)
+
+class IOHandlerCastro(IOHandlerBoxlib):
+    _data_style = "castro_native"
+
+    def _read_particle_field(self, grid, field):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+            castro_particle_field_names.index(field),
+            len(castro_particle_field_names),
+            tr)
+        return tr
+
+nyx_particle_field_names = ['particle_position_%s' % ax for ax in 'xyz'] + \
+                           ['particle_mass'] +  \
+                           ['particle_velocity_%s' % ax for ax in 'xyz']
+
+class IOHandlerNyx(IOHandlerBoxlib):
+    _data_style = "nyx_native"
+
+    def _read_particle_coords(self, chunks, ptf):
+        offset = grid._particle_offset
+        filen = os.path.expanduser(grid.particle_filename)
+        off = grid._particle_offset
+        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
+        read_castro_particles(filen, off,
+                            nyx_particle_field_names.index(field),
+                            len(nyx_particle_field_names), tr)
+
+    def _read_particle_fields(self, chunks, ptf, fields):
+        pass

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/setup.py
--- /dev/null
+++ b/yt/frontends/boxlib/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('orion', parent_package, top_path)
+    config.make_config_py()  # installs __config__.py
+    #config.make_svn_version_py()
+    return config

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/boxlib/tests/test_orion.py
--- /dev/null
+++ b/yt/frontends/boxlib/tests/test_orion.py
@@ -0,0 +1,42 @@
+"""
+Orion frontend tests
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.testing import *
+from yt.utilities.answer_testing.framework import \
+    requires_pf, \
+    small_patch_amr, \
+    big_patch_amr, \
+    data_dir_load
+from yt.frontends.orion.api import OrionStaticOutput
+
+_fields = ("Temperature", "Density", "VelocityMagnitude", "DivV")
+
+radadvect = "RadAdvect/plt00000"
+ at requires_pf(radadvect)
+def test_radadvect():
+    pf = data_dir_load(radadvect)
+    yield assert_equal, str(pf), "plt00000"
+    for test in small_patch_amr(radadvect, _fields):
+        test_radadvect.__name__ = test.description
+        yield test
+
+rt = "RadTube/plt00500"
+ at requires_pf(rt)
+def test_radtube():
+    pf = data_dir_load(rt)
+    yield assert_equal, str(pf), "plt00500"
+    for test in small_patch_amr(rt, _fields):
+        test_radtube.__name__ = test.description
+        yield test

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/castro/__init__.py
--- a/yt/frontends/castro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r cebc11653e4ba141fa3972848917f65265058f6e -r d506268450c5df3a8d6c4dbf8d0ad0bdb0bd588c yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CastroGrid, \
-      CastroHierarchy, \
-      CastroStaticOutput
-
-from .fields import \
-      CastroFieldInfo, \
-      add_castro_field
-
-from .io import \
-      IOHandlerNative

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list