[yt-svn] commit/yt: 4 new changesets

commits-noreply at bitbucket.org commits-noreply at bitbucket.org
Thu Nov 14 06:26:21 PST 2013


4 new commits in yt:

https://bitbucket.org/yt_analysis/yt/commits/6a26fa15225b/
Changeset:   6a26fa15225b
Branch:      yt
User:        jzuhone
Date:        2013-11-13 18:05:11
Summary:     RA and DEC need to be reversed
Affected #:  1 file

diff -r 718059a11627949ecdbafbefed67d699c9a04e66 -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -948,9 +948,9 @@
         col1 = pyfits.Column(name='ENERGY', format='E',
                              array=self["eobs"])
         col2 = pyfits.Column(name='DEC', format='D',
+                             array=self["ysky"])
+        col3 = pyfits.Column(name='RA', format='D',
                              array=self["xsky"])
-        col3 = pyfits.Column(name='RA', format='D',
-                             array=self["ysky"])
 
         coldefs = pyfits.ColDefs([col1, col2, col3])
 


https://bitbucket.org/yt_analysis/yt/commits/1ef19ed553b4/
Changeset:   1ef19ed553b4
Branch:      yt
User:        jzuhone
Date:        2013-11-13 18:12:15
Summary:     Merged yt_analysis/yt into yt
Affected #:  35 files

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold011',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -874,7 +874,8 @@
         pw.set_axes_unit(axes_unit)
         return pw
 
-    def to_frb(self, width, resolution, center=None, height=None):
+    def to_frb(self, width, resolution, center=None, height=None,
+               periodic = False):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -899,6 +900,9 @@
         center : array-like of floats, optional
             The center of the FRB.  If not specified, defaults to the center of
             the current object.
+        periodic : bool
+            Should the returned Fixed Resolution Buffer be periodic?  (default:
+            False).
 
         Returns
         -------
@@ -932,7 +936,8 @@
         yax = y_dict[self.axis]
         bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
                   center[yax] - height*0.5, center[yax] + height*0.5)
-        frb = FixedResolutionBuffer(self, bounds, resolution)
+        frb = FixedResolutionBuffer(self, bounds, resolution,
+                                    periodic = periodic)
         return frb
 
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
@@ -1831,9 +1836,9 @@
         # It is probably faster, as it consolidates IO, but if we did it in
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
-            dependencies = self.get_dependencies(fields, ghost_zones = False)
+            dependencies = self.get_dependencies(fields)
             mylog.debug("Preloading %s grids and getting %s",
-                            len(self.source._get_grid_objs()),
+                            len([g for g in self.source._get_grid_objs()]),
                             dependencies)
             self.comm.preload([g for g in self._get_grid_objs()],
                           dependencies, self.hierarchy.io)
@@ -1950,7 +1955,7 @@
         grids_to_initialize = [g for g in self._grids if (g.Level == level)]
         zero_out = (level != self._max_level)
         if len(grids_to_initialize) == 0: return
-        pbar = get_pbar('Initializing tree % 2i / % 2i' \
+        pbar = get_pbar('Initializing tree % 2i / % 2i ' \
                           % (level, self._max_level), len(grids_to_initialize))
         start_index = np.empty(2, dtype="int64")
         dims = np.empty(2, dtype="int64")

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/__init__.py
--- a/yt/frontends/castro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CastroGrid, \
-      CastroHierarchy, \
-      CastroStaticOutput
-
-from .fields import \
-      CastroFieldInfo, \
-      add_castro_field
-
-from .io import \
-      IOHandlerNative

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""
-Data structures for Castro.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import re
-import os
-import weakref
-import itertools
-from collections import defaultdict
-from string import strip, rstrip
-from stat import ST_CTIME
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
-from yt.data_objects.grid_patch import AMRGridPatch
-from yt.data_objects.hierarchy import AMRHierarchy
-from yt.data_objects.static_output import StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.lib import get_box_grids_level
-
-from .definitions import \
-    castro2enzoDict, \
-    parameterDict, \
-    yt2castroFieldsDict, \
-    castro_FAB_header_pattern, \
-    castro_particle_field_names, \
-    boxlib_bool_to_int
-from .fields import \
-    CastroFieldInfo, \
-    KnownCastroFields, \
-    add_castro_field
-
-
-class CastroGrid(AMRGridPatch):
-    _id_offset = 0
-
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
-                 dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(index, **kwargs)
-        self.filename = filename
-        self._offset = offset
-        self._paranoid = paranoia  # TODO: Factor this behavior out in tests
-
-        ### TODO: error check this (test)
-        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
-        self.LeftEdge  = LeftEdge.copy()
-        self.RightEdge = RightEdge.copy()
-        self.index = index
-        self.Level = level
-
-    def get_global_startindex(self):
-        return self.start_index
-
-    def _prepare_grid(self):
-        """ Copies all the appropriate attributes from the hierarchy. """
-        # This is definitely the slowest part of generating the hierarchy.
-        # Now we give it pointers to all of its attributes
-        # Note that to keep in line with Enzo, we have broken PEP-8
-
-        h = self.hierarchy # cache it
-        #self.StartIndices = h.gridStartIndices[self.id]
-        #self.EndIndices = h.gridEndIndices[self.id]
-        h.grid_levels[self.id,0] = self.Level
-        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
-        h.grid_right_edge[self.id,:] = self.RightEdge[:]
-        #self.Time = h.gridTimes[self.id,0]
-        #self.NumberOfParticles = h.gridNumberOfParticles[self.id,0]
-        self.field_indexes = h.field_indexes
-        self.Children = h.gridTree[self.id]
-        pIDs = h.gridReverseTree[self.id]
-
-        if len(pIDs) > 0:
-            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
-        else:
-            self.Parent = None
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        dx = self.hierarchy.grid_dxs[self.index][0]
-        dy = self.hierarchy.grid_dys[self.index][0]
-        dz = self.hierarchy.grid_dzs[self.index][0]
-        self.dds = np.array([dx, dy, dz])
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    def __repr__(self):
-        return "CastroGrid_%04i" % (self.id)
-
-class CastroHierarchy(AMRHierarchy):
-    grid = CastroGrid
-
-    def __init__(self, pf, data_style='castro_native'):
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        header_filename = os.path.join(pf.fullplotdir, 'Header')
-        self.directory = pf.fullpath
-        self.data_style = data_style
-
-        # This also sets up the grid objects
-        self.read_global_header(header_filename,
-                                self.parameter_file.paranoid_read) 
-        self.read_particle_header()
-        self._cache_endianness(self.levels[-1].grids[-1])
-
-        super(CastroHierarchy, self).__init__(pf, data_style)
-        self._setup_data_io()
-        self._setup_field_list()
-        self._populate_hierarchy()
-
-    def read_global_header(self, filename, paranoid_read):
-        """ Read the global header file for an Castro plotfile output. """
-        counter = 0
-        header_file = open(filename, 'r')
-        self._global_header_lines = header_file.readlines()
-
-        # parse the file
-        self.castro_version = self._global_header_lines[0].rstrip()
-        self.n_fields = int(self._global_header_lines[1])
-
-        counter = self.n_fields + 2
-        self.field_list = []
-        for i, line in enumerate(self._global_header_lines[2:counter]):
-            self.field_list.append(line.rstrip())
-
-        # this is unused...eliminate it?
-        #for f in self.field_indexes:
-        #    self.field_list.append(castro2ytFieldsDict.get(f, f))
-
-        self.dimension = int(self._global_header_lines[counter])
-        if self.dimension != 3:
-            raise RunTimeError("Castro must be in 3D to use yt.")
-
-        counter += 1
-        self.Time = float(self._global_header_lines[counter])
-        counter += 1
-        self.finest_grid_level = int(self._global_header_lines[counter])
-        self.n_levels = self.finest_grid_level + 1
-        counter += 1
-
-        # quantities with _unnecessary are also stored in the inputs
-        # file and are not needed.  they are read in and stored in
-        # case in the future we want to enable a "backwards" way of
-        # taking the data out of the Header file and using it to fill
-        # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #np.array(map(int, self._global_header_lines[counter].split()))
-        counter += 1
-        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
-        #domain_re.search(self._global_header_lines[counter]).groups()
-        counter += 1
-        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
-        counter += 1
-
-        self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = np.array(map(float, line.split()))
-        counter += self.n_levels
-        self.geometry = int(self._global_header_lines[counter])
-        if self.geometry != 0:
-            raise RunTimeError("yt only supports cartesian coordinates.")
-        counter += 1
-
-        # this is just to debug. eventually it should go away.
-        linebreak = int(self._global_header_lines[counter])
-        if linebreak != 0:
-            raise RunTimeError("INTERNAL ERROR! Header is unexpected size")
-        counter += 1
-
-        # Each level is one group with ngrids on it. each grid has 3 lines of 2 reals
-        # BoxLib madness
-        self.levels = []
-        grid_counter = 0
-        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
-        re_file_finder = re.compile(file_finder_pattern)
-        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
-        re_dim_finder = re.compile(dim_finder_pattern)
-        data_files_pattern = r"Level_[\d]/"
-        data_files_finder = re.compile(data_files_pattern)
-
-        for level in range(0, self.n_levels):
-            tmp = self._global_header_lines[counter].split()
-            # Should this be grid_time or level_time??
-            lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
-            counter += 1
-            nsteps = int(self._global_header_lines[counter])
-            counter += 1
-            self.levels.append(CastroLevel(lev, ngrids))
-            # Open level header, extract file names and offsets for each grid.
-            # Read slightly out of order here: at the end of the lo, hi pairs
-            # for x, y, z is a *list* of files types in the Level directory. 
-            # Each type has Header and a number of data files
-            # (one per processor)
-            tmp_offset = counter + 3*ngrids
-            nfiles = 0
-            key_off = 0
-            files =   {} # dict(map(lambda a: (a,[]), self.field_list))
-            offsets = {} # dict(map(lambda a: (a,[]), self.field_list))
-
-            while (nfiles + tmp_offset < len(self._global_header_lines) and
-                   data_files_finder.match(self._global_header_lines[nfiles+tmp_offset])):
-                filen = os.path.join(self.parameter_file.fullplotdir,
-                                     self._global_header_lines[nfiles+tmp_offset].strip())
-                # open each "_H" header file, and get the number of
-                # components within it
-                level_header_file = open(filen+'_H','r').read()
-                start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
-                grid_file_offset = re_file_finder.findall(level_header_file)
-                ncomp_this_file = int(level_header_file.split('\n')[2])
-
-                for i in range(ncomp_this_file):
-                    key = self.field_list[i+key_off]
-                    f, o = zip(*grid_file_offset)
-                    files[key] = f
-                    offsets[key] = o
-                    self.field_indexes[key] = i
-
-                key_off += ncomp_this_file
-                nfiles += 1
-
-            # convert dict of lists to list of dicts
-            fn = []
-            off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,
-                                     'Level_%i' % level)
-            for i in range(ngrids):
-                fi = [os.path.join(lead_path, files[key][i]) for key in self.field_list]
-                of = [int(offsets[key][i]) for key in self.field_list]
-                fn.append(dict(zip(self.field_list, fi)))
-                off.append(dict(zip(self.field_list, of)))
-
-            for grid in range(0, ngrids):
-                gfn = fn[grid]  # filename of file containing this grid
-                gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                ylo, yhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                zlo, zhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                lo = np.array([xlo, ylo, zlo])
-                hi = np.array([xhi, yhi, zhi])
-                dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
-                                                       level, gfn, gfo, dims,
-                                                       start, stop,
-                                                       paranoia=paranoid_read,  ### TODO: at least the code isn't schizophrenic paranoid
-                                                       hierarchy=self))
-                grid_counter += 1   # this is global, and shouldn't be reset
-                                    # for each level
-
-            # already read the filenames above...
-            counter += nfiles
-            self.num_grids = grid_counter
-            self.float_type = 'float64'
-
-        self.maxLevel = self.n_levels - 1
-        self.max_level = self.n_levels - 1
-        header_file.close()
-
-    def read_particle_header(self):
-        # We need to get particle offsets and particle counts
-        if not self.parameter_file.use_particles:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-
-        self.field_list += castro_particle_field_names[:]
-        header = open(os.path.join(self.parameter_file.fullplotdir, "DM",
-                                   "Header"))
-        version = header.readline()
-        ndim = header.readline()
-        nfields = header.readline()
-        ntotalpart = int(header.readline())
-        dummy = header.readline() # nextid
-        maxlevel = int(header.readline()) # max level
-
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = np.fromiter((int(i)
-                                 for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        self.pgrid_info = grid_info
-
-    def _cache_endianness(self, test_grid):
-        """
-        Cache the endianness and bytes perreal of the grids by using a test grid
-        and assuming that all grids have the same endianness. This is a pretty
-        safe assumption since Castro uses one file per processor, and if you're
-        running on a cluster with different endian processors, then you're on
-        your own!
-
-        """
-        # open the test file and grab the header
-        in_file = open(os.path.expanduser(test_grid.filename[self.field_list[0]]), 'rb')
-        header = in_file.readline()
-        in_file.close()
-        header.strip()
-        # Parse it. The pattern is in castro.definitions.py
-        header_re = re.compile(castro_FAB_header_pattern)
-        bytes_per_real, endian, start, stop, centerType, n_components = header_re.search(header).groups()
-        self._bytes_per_real = int(bytes_per_real)
-        if self._bytes_per_real == int(endian[0]):
-            dtype = '<'
-        elif self._bytes_per_real == int(endian[-1]):
-            dtype = '>'
-        else:
-            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-        dtype += ('f%i' % self._bytes_per_real) # always a floating point
-        self._dtype = dtype
-
-    def _calculate_grid_dimensions(self, start_stop):
-        start = np.array(map(int, start_stop[0].split(',')))
-        stop = np.array(map(int, start_stop[1].split(',')))
-        dimension = stop - start + 1
-        return dimension, start, stop
-
-    def _populate_grid_objects(self):
-        mylog.debug("Creating grid objects")
-
-        self.grids = np.concatenate([level.grids for level in self.levels])
-        basedir = self.parameter_file.fullplotdir
-
-        for g, pg in itertools.izip(self.grids, self.pgrid_info):
-            g.particle_filename = os.path.join(
-                basedir, "DM", "Level_%s" % (g.Level), "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:,0] = self.pgrid_info[:,1]
-        del self.pgrid_info
-
-        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
-        self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
-                                  for level in self.levels], axis=0)
-
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
-
-        left_edges = []
-        right_edges = []
-        dims = []
-        for level in self.levels:
-            left_edges += [g.LeftEdge for g in level.grids]
-            right_edges += [g.RightEdge for g in level.grids]
-            dims += [g.ActiveDimensions for g in level.grids]
-
-        self.grid_left_edge = np.array(left_edges)
-        self.grid_right_edge = np.array(right_edges)
-        self.grid_dimensions = np.array(dims)
-        self.gridReverseTree = [] * self.num_grids
-        self.gridReverseTree = [ [] for i in range(self.num_grids)]
-        self.gridTree = [ [] for i in range(self.num_grids)]
-
-        mylog.debug("Done creating grid objects")
-
-    def _populate_hierarchy(self):
-        self._setup_grid_tree()
-        #self._setup_grid_corners()
-
-        for i, grid in enumerate(self.grids):
-            if (i % 1e4) == 0:
-                mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
-
-            grid._prepare_grid()
-            grid._setup_dx()
-
-    def _setup_grid_tree(self):
-        mask = np.empty(self.grids.size, dtype='int32')
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            children = self.grids[mask.astype("bool")]
-            #assert(len(children) == len(self._get_grid_children(grid)))
-            for child in children:
-                self.gridReverseTree[child.id].append(i)
-                self.gridTree[i].append(weakref.proxy(child))
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        dd["field_indexes"] = self.field_indexes
-        AMRHierarchy._setup_classes(self, dd)
-        #self._add_object_class('grid', "CastroGrid", CastroGridBase, dd)
-        self.object_types.sort()
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return self.grids[mask]
-
-    def _setup_field_list(self):
-        if self.parameter_file.use_particles:
-            # We know which particle fields will exist -- pending further
-            # changes in the future.
-            for field in castro_particle_field_names:
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-                # Note that we call add_castro_field on the field_info directly.  This
-                # will allow the same field detection mechanism to work for 1D, 2D
-                # and 3D fields.
-                self.pf.field_info.add_castro_field(
-                        field, lambda a, b: None,
-                        convert_function=cf, take_log=False,
-                        particle_type=True)
-
-    ### TODO: check if this can be removed completely
-    def _count_grids(self):
-        """
-        this is already provided in ???
-
-        """
-        pass
-
-    def _initialize_grid_arrays(self):
-        mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
-
-    def _parse_hierarchy(self):
-        pass
-
-    def _detect_fields(self):
-        pass
-
-    def _setup_derived_fields(self):
-        pass
-
-    def _initialize_state_variables(self):
-        """override to not re-initialize num_grids in AMRHierarchy.__init__
-
-        """
-        self._parallel_locking = False
-        self._data_file = None
-        self._data_mode = None
-        self._max_locations = {}
-
-class CastroLevel:
-    def __init__(self, level, ngrids):
-        self.level = level
-        self.ngrids = ngrids
-        self.grids = []
-
-class CastroStaticOutput(StaticOutput):
-    """
-    This class is a stripped down class that simply reads and parses *filename*,
-    without looking at the Castro hierarchy.
-
-    """
-    _hierarchy_class = CastroHierarchy
-    _fieldinfo_fallback = CastroFieldInfo
-    _fieldinfo_known = KnownCastroFields
-
-    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
-                 data_style='castro_native', paranoia=False,
-                 storage_filename = None):
-        """
-        Need to override for Castro file structure.
-
-        the paramfile is usually called "inputs"
-        and there may be a fortran inputs file usually called "probin"
-        plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
-        """
-        self.storage_filename = storage_filename
-        self.paranoid_read = paranoia
-        self.parameter_filename = paramFilename
-        self.fparameter_filename = fparamFilename
-        self.__ipfn = paramFilename
-        self.fparameters = {}
-        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
-                                                 data_style='castro_native')
-
-
-        # These should maybe not be hardcoded?
-        ### TODO: this.
-        self.parameters["HydroMethod"] = 'castro' # always PPM DE
-        self.parameters["Time"] = 1.0 # default unit is 1...
-        self.parameters["DualEnergyFormalism"] = 0 # always off.
-        self.parameters["EOSType"] = -1 # default
-
-        if self.fparameters.has_key("mu"):
-            self.parameters["mu"] = self.fparameters["mu"]
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        pname = args[0].rstrip("/")
-        dn = os.path.dirname(pname)
-        if len(args) > 1:
-            kwargs['paramFilename'] = args[1]
-
-        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
-
-        # We check for the job_info file's existence because this is currently
-        # what distinguishes Castro data from MAESTRO data.
-        ### ^ that is nuts
-        pfn = os.path.join(pfname)
-        if not os.path.exists(pfn):
-            return False
-        castro = any(("castro." in line for line in open(pfn)))
-        nyx = any(("nyx." in line for line in open(pfn)))
-        castro = castro and (not nyx) # it's only castro if it's not nyx
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not castro) and (not maestro)
-        return castro
-
-    def _parse_parameter_file(self):
-        """
-        Parses the parameter file and establishes the various dictionaries.
-
-        """
-        # Boxlib madness
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
-        self._parse_header_file()
-        self.parameter_filename = self._localize(self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(self.fparameter_filename, 'probin')
-        if os.path.isfile(self.fparameter_filename):
-            self._parse_fparameter_file()
-            for param in self.fparameters:
-                if castro2enzoDict.has_key(param):
-                    self.parameters[castro2enzoDict[param]] = self.fparameters[param]
-
-        # Let's read the file
-        self.unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.parameter_filename).readlines()
-        self.use_particles = False
-
-        for line in lines:
-            if line.find("#") >= 1: # Keep the commented lines...
-                line = line[:line.find("#")]
-            line = line.strip().rstrip()
-            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
-                continue
-
-            try:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-
-            if castro2enzoDict.has_key(param):
-                paramName = castro2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
-            elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("particles.write_in_plotfile"):
-                self.use_particles = boxlib_bool_to_int(vals)
-            self.fparameters[param] = vals
-
-        self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
-        self.dimensionality = self.parameters["TopGridRank"]
-        self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
-        self.refine_by = self.parameters.get("RefineBy", 2)
-
-        if (self.parameters.has_key("ComovingCoordinates") and
-            bool(self.parameters["ComovingCoordinates"])):
-            self.cosmological_simulation = 1
-            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
-            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
-            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
-
-            # Stupid that we have to read a separate file for this :/
-            a_file = open(os.path.join(self.fullplotdir, "comoving_a"))
-            line = a_file.readline().strip()
-            a_file.close()
-
-            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
-            self.cosmological_scale_factor = float(line)
-            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
-        else:
-            ### TODO: make these defaults automatic
-            self.current_redshift = self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
-
-    def _parse_fparameter_file(self):
-        """
-        Parses the fortran parameter file for Castro. Most of this will be
-        useless, but this is where it keeps mu = mass per particle/m_hydrogen.
-
-        """
-        lines = open(self.fparameter_filename).readlines()
-        for line in lines:
-            if line.count("=") == 1:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0 and vals.count("\"") == 0:
-                    try:
-                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
-                    except ValueError:
-                        print "Failed on line", line
-                else:
-                    t = vals.split()
-                if len(t) == 1:
-                    self.fparameters[param] = t[0]
-                else:
-                    self.fparameters[param] = t
-
-    def _parse_header_file(self):
-        """
-        Parses the BoxLib header file to get any parameters stored there.
-        Hierarchy information is read out of this file in CastroHierarchy. 
-
-        Currently, only Time is read here.
-
-        """
-        header_file = open(os.path.join(self.fullplotdir, "Header"))
-        lines = header_file.readlines()
-        header_file.close()
-        n_fields = int(lines[1])
-        self.current_time = float(lines[3 + n_fields])
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the
-        parameter file.
-
-        """
-        self.units = {}
-        self.time_units = {}
-
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-
-        if self.cosmological_simulation:
-            cf = 1e5 * self.cosmological_scale_factor   # Where does the 1e5 come from?
-            for ax in 'xyz':
-                self.units['particle_velocity_%s' % ax] = cf
-            self.units['particle_mass'] = 1.989e33  ### TODO: Make a global solar mass def
-
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units. Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2castroFieldsDict:
-            self.conversion_factors[key] = 1.0
-        for key in castro_particle_field_names:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/definitions.py
--- a/yt/frontends/castro/definitions.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.funcs import *
-
-def boxlib_bool_to_int(v):
-    try:
-        return int(v)
-    except ValueError:
-        pass
-    v = v.upper().strip()
-    if v[0] == 'T':
-        return 1
-    elif v[0] == 'F':
-        return 0
-
-# TODO: get rid of enzo parameters we do not need
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": boxlib_bool_to_int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                }
-
-# converts the Castro inputs file name to the Enzo/yt name expected
-# throughout the code. key is Castro name, value is Enzo/yt equivalent
-castro2enzoDict = {"amr.n_cell": "TopGridDimensions",
-                  "materials.gamma": "Gamma",
-                  "amr.ref_ratio": "RefineBy",
-                  "castro.use_comoving": "ComovingCoordinates",
-                  "castro.redshift_in": "CosmologyInitialRedshift",
-                  "comoving_OmL": "CosmologyOmegaLambdaNow",
-                  "comoving_OmM": "CosmologyOmegaMatterNow",
-                  "comoving_h": "CosmologyHubbleConstantNow"
-                  }
-
-yt2castroFieldsDict = {}
-castro2ytFieldsDict = {}
-
-castro_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"
-
-castro_particle_field_names = \
-    ['particle_position_%s' % ax for ax in 'xyz'] + \
-    ['particle_mass'] +  \
-    ['particle_velocity_%s' % ax for ax in 'xyz']

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/fields.py
--- a/yt/frontends/castro/fields.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""
-Castro-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    TranslationFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import mh, kboltz
-
-translation_dict = {
-    "x-velocity": "xvel",
-    "y-velocity": "yvel",
-    "z-velocity": "zvel",
-    "Density": "density",
-    "Total_Energy": "eden",
-    "Temperature": "temperature",
-    "x-momentum": "xmom",
-    "y-momentum": "ymom",
-    "z-momentum": "zmom"
-}
-
-# Setup containers for fields possibly in the output files
-KnownCastroFields = FieldInfoContainer()
-add_castro_field = KnownCastroFields.add_field
-
-# and always derived ones
-CastroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CastroFieldInfo.add_field
-
-# Start adding fields
-add_castro_field("density", function=NullFunc, take_log=True,
-                 units=r"\rm{g}/\rm{cm}^3")
-
-# fix projected units
-KnownCastroFields["density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-
-add_castro_field("eden", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("eden")],
-                 units=r"\rm{erg}/\rm{cm}^3")
-
-add_castro_field("xmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("xmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-add_castro_field("ymom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("ymom")],
-                 units=r"\rm{gm}/\rm{cm^2\ s}")
-
-add_castro_field("zmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("zmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-# Now populate derived fields
-for mine, theirs in translation_dict.items():
-    if KnownCastroFields.has_key(theirs):
-        add_field(theirs, function=TranslationFunc(mine),
-                  take_log=KnownCastroFields[theirs].take_log)
-
-# Now fallbacks, in case these fields are not output
-def _xVelocity(field, data):
-    """ Generate x-velocity from x-momentum and density. """
-    return data["xmom"] / data["density"]
-
-add_field("x-velocity", function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _yVelocity(field, data):
-    """ Generate y-velocity from y-momentum and density. """
-    return data["ymom"] / data["density"]
-
-add_field("y-velocity", function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field, data):
-    """ Generate z-velocity from z-momentum and density. """
-    return data["zmom"] / data["density"]
-
-add_field("z-velocity", function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _ThermalEnergy(field, data):
-    """
-    Generate thermal (gas energy). Dual Energy Formalism was implemented by
-    Stella, but this isn't how it's called, so I'll leave that commented out for
-    now.
-
-    """
-    #if data.pf["DualEnergyFormalism"]:
-    #    return data["Gas_Energy"]
-    #else:
-    return data["Total_Energy"] - 0.5 * data["density"] * (
-        data["x-velocity"]**2.0
-        + data["y-velocity"]**2.0
-        + data["z-velocity"]**2.0 )
-
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
-
-def _Pressure(field, data):
-    """
-    M{(Gamma-1.0)*e, where e is thermal energy density
-    
-    NB: this will need to be modified for radiation
-
-    """
-    return (data.pf["Gamma"] - 1.0) * data["ThermalEnergy"]
-
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
-
-def _Temperature(field, data):
-    return ((data.pf["Gamma"] - 1.0) * data.pf["mu"] * mh *
-            data["ThermalEnergy"] / (kboltz * data["Density"]))
-
-add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}",
-          take_log=False)
-
-def _convertParticleMassMsun(data):
-    return 1.0 / 1.989e33
-def _ParticleMassMsun(field, data):
-    return data["particle_mass"]
-
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_ParticleMassMsun)
-
-# Fundamental fields that are usually/always output:
-#   density
-#   xmom
-#   ymom
-#   zmom
-#   rho_E
-#   rho_e
-#   Temp
-#
-# "Derived" fields that are sometimes output:
-#   x_velocity
-#   y_velocity
-#   z_velocity
-#   magvel
-#   grav_x
-#   grav_y
-#   grav_z
-#   maggrav
-#   magvort
-#   pressure
-#   entropy
-#   divu
-#   eint_e (e as derived from the "rho e" variable)
-#   eint_E (e as derived from the "rho E" variable)

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""
-Castro data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-from yt.utilities.io_handler import \
-           BaseIOHandler
-from yt.utilities.lib import \
-            read_castro_particles
-
-from definitions import \
-    yt2castroFieldsDict, \
-    castro_particle_field_names
-
-class IOHandlerNative(BaseIOHandler):
-
-    _data_style = "castro_native"
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
-    def _read_particle_field(self, grid, field):
-        offset = grid._particle_offset
-        filen = os.path.expanduser(grid.particle_filename)
-        off = grid._particle_offset
-        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
-        read_castro_particles(filen, off,
-            castro_particle_field_names.index(field),
-            len(castro_particle_field_names),
-            tr)
-        return tr
-
-    def _read_data(self, grid, field):
-        """
-        reads packed multiFABs output by BoxLib in "NATIVE" format.
-
-        """
-        if field in castro_particle_field_names:
-            return self._read_particle_field(grid, field)
-        filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen,'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            headerRe = re.compile(castro_FAB_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian
-            # check endianness (this code is ugly. fix?)
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i'% bytesPerReal) #always a floating point
-
-            # determine size of FAB
-            start = np.array(map(int, start.split(',')))
-            stop = np.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytes_per_real:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-
-        else:
-            start = grid.start_index
-            stop = grid.stop_index
-            dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytes_per_real
-
-        nElements = grid.ActiveDimensions.prod()
-
-        # one field has nElements*bytesPerReal bytes and is located
-        # nElements*bytesPerReal*field_index from the offset location
-        if yt2castroFieldsDict.has_key(field):
-            fieldname = yt2castroFieldsDict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = np.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
-
-        # we can/should also check against the max and min in the header file
-
-        inFile.close()
-        return field

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/castro/setup.py
--- a/yt/frontends/castro/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('castro', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -225,6 +225,7 @@
         self.object_types.sort()
 
     def _count_grids(self):
+        self.num_grids = None
         test_grid = test_grid_id = None
         self.num_stars = 0
         for line in rlines(open(self.hierarchy_filename, "rb")):
@@ -235,8 +236,11 @@
             if line.startswith("NumberOfStarParticles"):
                 self.num_stars = int(line.split("=")[-1])
             if line.startswith("Grid "):
-                self.num_grids = test_grid_id = int(line.split("=")[-1])
-                break
+                if self.num_grids is None:
+                    self.num_grids = int(line.split("=")[-1])
+                test_grid_id = int(line.split("=")[-1])
+                if test_grid is not None:
+                    break
         self._guess_data_style(self.pf.dimensionality, test_grid, test_grid_id)
 
     def _guess_data_style(self, rank, test_grid, test_grid_id):
@@ -293,7 +297,7 @@
         self.grids[0].Level = 0
         si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
-        pbar = get_pbar("Parsing Hierarchy", self.num_grids)
+        pbar = get_pbar("Parsing Hierarchy ", self.num_grids)
         for grid_id in xrange(self.num_grids):
             pbar.update(grid_id)
             # We will unroll this list

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -404,7 +404,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
+                           data["particle_mass"][filter].astype(np.float64),
                            num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/__init__.py
--- a/yt/frontends/gadget/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.gadget
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.gadget
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      GadgetGrid, \
-      GadgetHierarchy, \
-      GadgetStaticOutput
-
-from .fields import \
-      GadgetFieldInfo, \
-      add_gadget_field
-
-from .io import \
-      IOHandlerGadget

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""
-Data structures for Gadget.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-from itertools import izip
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
-from yt.data_objects.hierarchy import \
-    AMRHierarchy
-from yt.data_objects.static_output import \
-    StaticOutput
-from yt.utilities.definitions import \
-    sec_conversion
-
-from .fields import GadgetFieldInfo, KnownGadgetFields
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-
-class GadgetGrid(AMRGridPatch):
-    _id_offset = 0
-    def __init__(self, hierarchy, id, dimensions, start,
-                 level, parent_id, particle_count):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.filename,
-                              hierarchy = hierarchy)
-        self.Parent = [] # Only one parent per grid        
-        self.Children = []
-        self.Level = level
-        self.ActiveDimensions = dimensions.copy()
-        self.NumberOfParticles = particle_count
-        self.start_index = start.copy().astype("int64")
-        self.stop_index = self.start_index + dimensions.copy()
-        self.id = id
-        self._parent_id = parent_id
-        
-        try:
-            padd = '/data/grid_%010i/particles' % id
-            self.particle_types = self.hierarchy._handle[padd].keys()
-        except:
-            self.particle_types =  ()
-        self.filename = hierarchy.filename
-        
-    def __repr__(self):
-        return 'GadgetGrid_%05i'%self.id
-        
-class GadgetHierarchy(AMRHierarchy):
-    grid = GadgetGrid
-
-    def __init__(self, pf, data_style='gadget_hdf5'):
-        self.filename = pf.filename
-        self.directory = os.path.dirname(pf.filename)
-        self.data_style = data_style
-        self._handle = h5py.File(pf.filename)
-        AMRHierarchy.__init__(self, pf, data_style)
-        self._handle.close()
-        self._handle = None
-        
-
-    def _initialize_data_storage(self):
-        pass
-
-    def _detect_fields(self):
-        #this adds all the fields in 
-        #/particle_types/{Gas/Stars/etc.}/{position_x, etc.}
-        self.field_list = []
-        for ptype in self._handle['particle_types'].keys():
-            for field in self._handle['particle_types'+'/'+ptype]:
-                if field not in self.field_list:
-                    self.field_list += field,
-        
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _count_grids(self):
-        self.num_grids = len(self._handle['/grid_dimensions'])
-        
-    def _parse_hierarchy(self):
-        f = self._handle # shortcut
-        npa = np.array
-        DLE = self.parameter_file.domain_left_edge
-        DRE = self.parameter_file.domain_right_edge
-        DW = (DRE - DLE)
-        
-        self.grid_levels.flat[:] = f['/grid_level'][:].astype("int32")
-        LI = f['/grid_left_index'][:]
-        print LI
-        self.grid_dimensions[:] = f['/grid_dimensions'][:]
-        self.grid_left_edge[:]  = (LI * DW + DLE)
-        dxs = 1.0/(2**(self.grid_levels+1)) * DW
-        self.grid_right_edge[:] = self.grid_left_edge \
-                                + dxs *(1 + self.grid_dimensions)
-        self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
-        grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = np.max(self.grid_levels)
-        
-        args = izip(xrange(self.num_grids), self.grid_levels.flat,
-                    grid_parent_id, LI,
-                    self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = np.empty(len(args), dtype='object')
-        for gi, (j,lvl,p, le, d, n) in enumerate(args):
-            self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
-        
-    def _populate_grid_objects(self):    
-        for g in self.grids:
-            if g._parent_id >= 0:
-                parent = self.grids[g._parent_id]
-                g.Parent = parent
-                parent.Children.append(g)
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-            
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-class GadgetStaticOutput(StaticOutput):
-    _hierarchy_class = GadgetHierarchy
-    _fieldinfo_fallback = GadgetFieldInfo
-    _fieldinfo_known = KnownGadgetFields
-
-    def __init__(self, filename,storage_filename=None) :
-        self.storage_filename = storage_filename
-        self.filename = filename
-        
-        StaticOutput.__init__(self, filename, 'gadget_infrastructure')
-        self._set_units()
-        
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['cm'] = 1.0
-        self.units['unitary'] = 1.0 / \
-            (self.domain_right_edge - self.domain_left_edge).max()
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-
-    def _parse_parameter_file(self):
-        fileh = h5py.File(self.filename)
-        sim_param = fileh['/simulation_parameters'].attrs
-        self.refine_by = sim_param['refine_by']
-        self.dimensionality = sim_param['dimensionality']
-        self.num_ghost_zones = sim_param['num_ghost_zones']
-        self.field_ordering = sim_param['field_ordering']
-        self.domain_dimensions = sim_param['domain_dimensions']
-        self.current_time = sim_param['current_time']
-        self.domain_left_edge = sim_param['domain_left_edge']
-        self.domain_right_edge = sim_param['domain_right_edge']
-        self.unique_identifier = sim_param['unique_identifier']
-        self.cosmological_simulation = sim_param['cosmological_simulation']
-        self.current_redshift = sim_param['current_redshift']
-        self.omega_lambda = sim_param['omega_lambda']
-        self.hubble_constant = sim_param['hubble_constant']
-        fileh.close()
-        
-         
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        format = 'Gadget Infrastructure'
-        add1 = 'griddded_data_format'
-        add2 = 'data_software'
-        try:
-            fileh = h5py.File(args[0],'r')
-            if add1 in fileh['/'].items():
-                if add2 in fileh['/'+add1].attrs.keys():
-                    if fileh['/'+add1].attrs[add2] == format:
-                        fileh.close()
-                        return True
-            fileh.close()
-        except:
-            pass
-        return False

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""
-Gadget-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-
-GadgetFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_gadget_field = GadgetFieldInfo.add_field
-
-add_field = add_gadget_field
-
-translation_dict = {"particle_position_x" : "position_x",
-                    "particle_position_y" : "position_y",
-                    "particle_position_z" : "position_z",
-                   }
-
-def _generate_translation(mine, theirs):
-    pfield = mine.startswith("particle")
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True,
-              particle_type = pfield)
-
-for f,v in translation_dict.items():
-    if v not in GadgetFieldInfo:
-        # Note here that it's the yt field that we check for particle nature
-        pfield = f.startswith("particle")
-        add_field(v, function=lambda a,b: None, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
-
-
-#for f,v in translation_dict.items():
-#    add_field(f, function=lambda a,b: None, take_log=True,
-#        validators = [ValidateDataField(v)],
-#        units=r"\rm{cm}")
-#    add_field(v, function=lambda a,b: None, take_log=True,
-#        validators = [ValidateDataField(v)],
-#        units=r"\rm{cm}")
-          
-
-          
-add_field("position_x", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_x")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("position_y", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_y")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("position_z", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_z")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("VEL", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("VEL")],
-          units=r"")
-
-add_field("id", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ID")],
-          units=r"")
-
-add_field("mass", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("mass")],
-          units=r"\rm{g}")
-def _particle_mass(field, data):
-    return data["mass"]/just_one(data["CellVolume"])
-def _convert_particle_mass(data):
-    return 1.0
-add_field("particle_mass", function=_particle_mass, take_log=True,
-          convert_function=_convert_particle_mass,
-          validators = [ValidateSpatial(0)],
-          units=r"\mathrm{g}\/\mathrm{cm}^{-3}")
-
-add_field("U", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("U")],
-          units=r"")
-
-add_field("NE", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("NE")],
-          units=r"")
-
-add_field("POT", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("POT")],
-          units=r"")
-
-add_field("ACCE", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ACCE")],
-          units=r"")
-
-add_field("ENDT", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ENDT")],
-          units=r"")
-
-add_field("TSTP", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("TSTP")],
-          units=r"")
-

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-Gadget-specific data-file handling function
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-
-class IOHandlerGadget(BaseIOHandler):
-    _data_style = 'gadget_infrastructure'
-    def _read_data(self, grid, field):
-        data = []
-        fh = h5py.File(grid.filename,mode='r')
-        for ptype in grid.particle_types:
-            address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
-            data.append(fh[address][:])
-        if len(data) > 0:
-            data = np.concatenate(data)
-        fh.close()
-        return np.array(data)
-    def _read_field_names(self,grid): 
-        adr = grid.Address
-        fh = h5py.File(grid.filename,mode='r')
-        rets = cPickle.loads(fh['/root'].attrs['fieldnames'])
-        fh.close()
-        return rets
-
-    def _read_data_slice(self,grid, field, axis, coord):
-        #how would we implement axis here?
-        dat = self._read_data(grid,field)
-        return dat[coord]
-

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/gadget/setup.py
--- a/yt/frontends/gadget/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('gadget', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 1ef19ed553b44f488aae8274c8b04b5245cb2afb yt/frontends/maestro/__init__.py
--- a/yt/frontends/maestro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.maestro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

This diff is so big that we needed to truncate the remainder.

https://bitbucket.org/yt_analysis/yt/commits/02d68ec508c0/
Changeset:   02d68ec508c0
Branch:      yt
User:        jzuhone
Date:        2013-11-13 19:31:39
Summary:     Hubble constant shouldn't be zero.
Affected #:  1 file

diff -r 6a26fa15225b7064b7ea555f4c2c0ab2cc9a5fc7 -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -250,6 +250,7 @@
             hubble = getattr(pf, "hubble_constant", None)
             omega_m = getattr(pf, "omega_matter", None)
             omega_l = getattr(pf, "omega_lambda", None)
+            if hubble == 0: hubble = None
             if hubble is not None and \
                omega_m is not None and \
                omega_l is not None:


https://bitbucket.org/yt_analysis/yt/commits/2a282b183110/
Changeset:   2a282b183110
Branch:      yt
User:        jzuhone
Date:        2013-11-13 19:31:59
Summary:     Merging
Affected #:  35 files

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
 echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410  xray_emissivity.h5' > xray_emissivity.h5.sha512
 get_ytdata xray_emissivity.h5
 
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
 mkdir -p ${DEST_DIR}/src
 cd ${DEST_DIR}/src
 

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
         self._calculate_deltaz_min(deltaz_min=deltaz_min)
 
         cosmology_splice = []
-
+ 
+        if near_redshift == far_redshift:
+            self.simulation.get_time_series(redshifts=[near_redshift])
+            cosmology_splice.append({'time': self.simulation[0].current_time,
+                                     'redshift': self.simulation[0].current_redshift,
+                                     'filename': os.path.join(self.simulation[0].fullpath,
+                                                              self.simulation[0].basename),
+                                     'next': None})
+            mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+                       (cosmology_splice[0]['filename'], near_redshift))
+            return cosmology_splice
+        
         # Use minimum number of datasets to go from z_i to z_f.
         if minimal:
 

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
     only_on_root, \
     parallel_objects, \
     parallel_root_only
+from yt.utilities.physical_constants import \
+     speed_of_light_cgs, \
+     cm_per_km
 
 class LightRay(CosmologySplice):
     """
@@ -51,7 +54,9 @@
     near_redshift : float
         The near (lowest) redshift for the light ray.
     far_redshift : float
-        The far (highest) redshift for the light ray.
+        The far (highest) redshift for the light ray.  NOTE: in order 
+        to use only a single dataset in a light ray, set the 
+        near_redshift and far_redshift to be the same.
     use_minimum_datasets : bool
         If True, the minimum number of datasets is used to connect the
         initial and final redshift.  If false, the light ray solution
@@ -111,65 +116,92 @@
                                        time_data=time_data,
                                        redshift_data=redshift_data)
 
-    def _calculate_light_ray_solution(self, seed=None, filename=None):
+    def _calculate_light_ray_solution(self, seed=None, 
+                                      start_position=None, end_position=None,
+                                      trajectory=None, filename=None):
         "Create list of datasets to be added together to make the light ray."
 
         # Calculate dataset sizes, and get random dataset axes and centers.
         np.random.seed(seed)
 
-        # For box coherence, keep track of effective depth travelled.
-        box_fraction_used = 0.0
+        # If using only one dataset, set start and stop manually.
+        if start_position is not None:
+            if len(self.light_ray_solution) > 1:
+                raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+            if not ((end_position is None) ^ (trajectory is None)):
+                raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+            self.light_ray_solution[0]['start'] = np.array(start_position)
+            if end_position is not None:
+                self.light_ray_solution[0]['end'] = np.array(end_position)
+            else:
+                # assume trajectory given as r, theta, phi
+                if len(trajectory) != 3:
+                    raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+                r, theta, phi = trajectory
+                self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+                  r * np.array([np.cos(phi) * np.sin(theta),
+                                np.sin(phi) * np.sin(theta),
+                                np.cos(theta)])
+            self.light_ray_solution[0]['traversal_box_fraction'] = \
+              vector_length(self.light_ray_solution[0]['start'], 
+                            self.light_ray_solution[0]['end'])
 
-        for q in range(len(self.light_ray_solution)):
-            if (q == len(self.light_ray_solution) - 1):
-                z_next = self.near_redshift
-            else:
-                z_next = self.light_ray_solution[q+1]['redshift']
+        # the normal way (random start positions and trajectories for each dataset)
+        else:
+            
+            # For box coherence, keep track of effective depth travelled.
+            box_fraction_used = 0.0
 
-            # Calculate fraction of box required for a depth of delta z
-            self.light_ray_solution[q]['traversal_box_fraction'] = \
-                self.cosmology.ComovingRadialDistance(\
-                z_next, self.light_ray_solution[q]['redshift']) * \
-                self.simulation.hubble_constant / \
-                self.simulation.box_size
+            for q in range(len(self.light_ray_solution)):
+                if (q == len(self.light_ray_solution) - 1):
+                    z_next = self.near_redshift
+                else:
+                    z_next = self.light_ray_solution[q+1]['redshift']
 
-            # Simple error check to make sure more than 100% of box depth
-            # is never required.
-            if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
-                            (self.light_ray_solution[q]['redshift'], z_next,
-                             self.light_ray_solution[q]['traversal_box_fraction']))
-                mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
-                            (self.light_ray_solution[q]['deltazMax'],
-                             self.light_ray_solution[q]['redshift']-z_next))
+                # Calculate fraction of box required for a depth of delta z
+                self.light_ray_solution[q]['traversal_box_fraction'] = \
+                    self.cosmology.ComovingRadialDistance(\
+                    z_next, self.light_ray_solution[q]['redshift']) * \
+                    self.simulation.hubble_constant / \
+                    self.simulation.box_size
 
-            # Get dataset axis and center.
-            # If using box coherence, only get start point and vector if
-            # enough of the box has been used,
-            # or if box_fraction_used will be greater than 1 after this slice.
-            if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
-                    (box_fraction_used >
-                     self.minimum_coherent_box_fraction) or \
-                    (box_fraction_used +
-                     self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
-                # Random start point
-                self.light_ray_solution[q]['start'] = np.random.random(3)
-                theta = np.pi * np.random.random()
-                phi = 2 * np.pi * np.random.random()
-                box_fraction_used = 0.0
-            else:
-                # Use end point of previous segment and same theta and phi.
-                self.light_ray_solution[q]['start'] = \
-                  self.light_ray_solution[q-1]['end'][:]
+                # Simple error check to make sure more than 100% of box depth
+                # is never required.
+                if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+                                (self.light_ray_solution[q]['redshift'], z_next,
+                                 self.light_ray_solution[q]['traversal_box_fraction']))
+                    mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+                                (self.light_ray_solution[q]['deltazMax'],
+                                 self.light_ray_solution[q]['redshift']-z_next))
 
-            self.light_ray_solution[q]['end'] = \
-              self.light_ray_solution[q]['start'] + \
-                self.light_ray_solution[q]['traversal_box_fraction'] * \
-                np.array([np.cos(phi) * np.sin(theta),
-                          np.sin(phi) * np.sin(theta),
-                          np.cos(theta)])
-            box_fraction_used += \
-              self.light_ray_solution[q]['traversal_box_fraction']
+                # Get dataset axis and center.
+                # If using box coherence, only get start point and vector if
+                # enough of the box has been used,
+                # or if box_fraction_used will be greater than 1 after this slice.
+                if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+                        (box_fraction_used >
+                         self.minimum_coherent_box_fraction) or \
+                        (box_fraction_used +
+                         self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+                    # Random start point
+                    self.light_ray_solution[q]['start'] = np.random.random(3)
+                    theta = np.pi * np.random.random()
+                    phi = 2 * np.pi * np.random.random()
+                    box_fraction_used = 0.0
+                else:
+                    # Use end point of previous segment and same theta and phi.
+                    self.light_ray_solution[q]['start'] = \
+                      self.light_ray_solution[q-1]['end'][:]
+
+                self.light_ray_solution[q]['end'] = \
+                  self.light_ray_solution[q]['start'] + \
+                    self.light_ray_solution[q]['traversal_box_fraction'] * \
+                    np.array([np.cos(phi) * np.sin(theta),
+                              np.sin(phi) * np.sin(theta),
+                              np.cos(theta)])
+                box_fraction_used += \
+                  self.light_ray_solution[q]['traversal_box_fraction']
 
         if filename is not None:
             self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
                             'far_redshift':self.far_redshift,
                             'near_redshift':self.near_redshift})
 
-    def make_light_ray(self, seed=None, fields=None,
+    def make_light_ray(self, seed=None,
+                       start_position=None, end_position=None,
+                       trajectory=None,
+                       fields=None,
                        solution_filename=None, data_filename=None,
                        get_los_velocity=False,
                        get_nearest_halo=False,
@@ -197,6 +232,19 @@
         seed : int
             Seed for the random number generator.
             Default: None.
+        start_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the starting position of the ray.
+            Default: None.
+        end_position : list of floats
+            Used only if creating a light ray from a single dataset.
+            The coordinates of the ending position of the ray.
+            Default: None.
+        trajectory : list of floats
+            Used only if creating a light ray from a single dataset.
+            The (r, theta, phi) direction of the light ray.  Use either 
+        end_position or trajectory, not both.
+            Default: None.
         fields : list
             A list of fields for which to get data.
             Default: None.
@@ -313,7 +361,11 @@
             nearest_halo_fields = []
 
         # Calculate solution.
-        self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+        self._calculate_light_ray_solution(seed=seed, 
+                                           start_position=start_position, 
+                                           end_position=end_position,
+                                           trajectory=trajectory,
+                                           filename=solution_filename)
 
         # Initialize data structures.
         self._data = {}
@@ -335,9 +387,18 @@
         for my_storage, my_segment in parallel_objects(self.light_ray_solution,
                                                        storage=all_ray_storage,
                                                        njobs=njobs, dynamic=dynamic):
-            mylog.info("Creating ray segment at z = %f." %
-                       my_segment['redshift'])
-            if my_segment['next'] is None:
+
+            # Load dataset for segment.
+            pf = load(my_segment['filename'])
+
+            if self.near_redshift == self.far_redshift:
+                h_vel = cm_per_km * pf.units['mpc'] * \
+                  vector_length(my_segment['start'], my_segment['end']) * \
+                  self.cosmology.HubbleConstantNow * \
+                  self.cosmology.ExpansionFactor(my_segment['redshift'])
+                next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+                                         (1. - h_vel / speed_of_light_cgs)) - 1.
+            elif my_segment['next'] is None:
                 next_redshift = self.near_redshift
             else:
                 next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
                        (my_segment['redshift'], my_segment['start'],
                         my_segment['end']))
 
-            # Load dataset for segment.
-            pf = load(my_segment['filename'])
-
             # Break periodic ray into non-periodic segments.
             sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
 

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
     notebook_password = '',
     answer_testing_tolerance = '3',
     answer_testing_bitwise = 'False',
-    gold_standard_filename = 'gold010',
+    gold_standard_filename = 'gold011',
     local_standard_filename = 'local001',
     sketchfab_api_key = 'None'
     )

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -874,7 +874,8 @@
         pw.set_axes_unit(axes_unit)
         return pw
 
-    def to_frb(self, width, resolution, center=None, height=None):
+    def to_frb(self, width, resolution, center=None, height=None,
+               periodic = False):
         r"""This function returns a FixedResolutionBuffer generated from this
         object.
 
@@ -899,6 +900,9 @@
         center : array-like of floats, optional
             The center of the FRB.  If not specified, defaults to the center of
             the current object.
+        periodic : bool
+            Should the returned Fixed Resolution Buffer be periodic?  (default:
+            False).
 
         Returns
         -------
@@ -932,7 +936,8 @@
         yax = y_dict[self.axis]
         bounds = (center[xax] - width*0.5, center[xax] + width*0.5,
                   center[yax] - height*0.5, center[yax] + height*0.5)
-        frb = FixedResolutionBuffer(self, bounds, resolution)
+        frb = FixedResolutionBuffer(self, bounds, resolution,
+                                    periodic = periodic)
         return frb
 
     def interpolate_discretize(self, LE, RE, field, side, log_spacing=True):
@@ -1831,9 +1836,9 @@
         # It is probably faster, as it consolidates IO, but if we did it in
         # _project_level, then it would be more memory conservative
         if self.preload_style == 'all':
-            dependencies = self.get_dependencies(fields, ghost_zones = False)
+            dependencies = self.get_dependencies(fields)
             mylog.debug("Preloading %s grids and getting %s",
-                            len(self.source._get_grid_objs()),
+                            len([g for g in self.source._get_grid_objs()]),
                             dependencies)
             self.comm.preload([g for g in self._get_grid_objs()],
                           dependencies, self.hierarchy.io)
@@ -1950,7 +1955,7 @@
         grids_to_initialize = [g for g in self._grids if (g.Level == level)]
         zero_out = (level != self._max_level)
         if len(grids_to_initialize) == 0: return
-        pbar = get_pbar('Initializing tree % 2i / % 2i' \
+        pbar = get_pbar('Initializing tree % 2i / % 2i ' \
                           % (level, self._max_level), len(grids_to_initialize))
         start_index = np.empty(2, dtype="int64")
         dims = np.empty(2, dtype="int64")

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/__init__.py
--- a/yt/frontends/castro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/api.py
--- a/yt/frontends/castro/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.castro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      CastroGrid, \
-      CastroHierarchy, \
-      CastroStaticOutput
-
-from .fields import \
-      CastroFieldInfo, \
-      add_castro_field
-
-from .io import \
-      IOHandlerNative

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ /dev/null
@@ -1,698 +0,0 @@
-"""
-Data structures for Castro.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import re
-import os
-import weakref
-import itertools
-from collections import defaultdict
-from string import strip, rstrip
-from stat import ST_CTIME
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import FieldInfoContainer, NullFunc
-from yt.data_objects.grid_patch import AMRGridPatch
-from yt.data_objects.hierarchy import AMRHierarchy
-from yt.data_objects.static_output import StaticOutput
-from yt.utilities.definitions import \
-    mpc_conversion, sec_conversion
-from yt.utilities.lib import get_box_grids_level
-
-from .definitions import \
-    castro2enzoDict, \
-    parameterDict, \
-    yt2castroFieldsDict, \
-    castro_FAB_header_pattern, \
-    castro_particle_field_names, \
-    boxlib_bool_to_int
-from .fields import \
-    CastroFieldInfo, \
-    KnownCastroFields, \
-    add_castro_field
-
-
-class CastroGrid(AMRGridPatch):
-    _id_offset = 0
-
-    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset,
-                 dimensions, start, stop, paranoia=False, **kwargs):
-        super(CastroGrid, self).__init__(index, **kwargs)
-        self.filename = filename
-        self._offset = offset
-        self._paranoid = paranoia  # TODO: Factor this behavior out in tests
-
-        ### TODO: error check this (test)
-        self.ActiveDimensions = (dimensions.copy()).astype('int32')#.transpose()
-        self.start_index = start.copy()#.transpose()
-        self.stop_index = stop.copy()#.transpose()
-        self.LeftEdge  = LeftEdge.copy()
-        self.RightEdge = RightEdge.copy()
-        self.index = index
-        self.Level = level
-
-    def get_global_startindex(self):
-        return self.start_index
-
-    def _prepare_grid(self):
-        """ Copies all the appropriate attributes from the hierarchy. """
-        # This is definitely the slowest part of generating the hierarchy.
-        # Now we give it pointers to all of its attributes
-        # Note that to keep in line with Enzo, we have broken PEP-8
-
-        h = self.hierarchy # cache it
-        #self.StartIndices = h.gridStartIndices[self.id]
-        #self.EndIndices = h.gridEndIndices[self.id]
-        h.grid_levels[self.id,0] = self.Level
-        h.grid_left_edge[self.id,:] = self.LeftEdge[:]
-        h.grid_right_edge[self.id,:] = self.RightEdge[:]
-        #self.Time = h.gridTimes[self.id,0]
-        #self.NumberOfParticles = h.gridNumberOfParticles[self.id,0]
-        self.field_indexes = h.field_indexes
-        self.Children = h.gridTree[self.id]
-        pIDs = h.gridReverseTree[self.id]
-
-        if len(pIDs) > 0:
-            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
-        else:
-            self.Parent = None
-
-    def _setup_dx(self):
-        # has already been read in and stored in hierarchy
-        dx = self.hierarchy.grid_dxs[self.index][0]
-        dy = self.hierarchy.grid_dys[self.index][0]
-        dz = self.hierarchy.grid_dzs[self.index][0]
-        self.dds = np.array([dx, dy, dz])
-        self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
-
-    def __repr__(self):
-        return "CastroGrid_%04i" % (self.id)
-
-class CastroHierarchy(AMRHierarchy):
-    grid = CastroGrid
-
-    def __init__(self, pf, data_style='castro_native'):
-        self.field_indexes = {}
-        self.parameter_file = weakref.proxy(pf)
-        header_filename = os.path.join(pf.fullplotdir, 'Header')
-        self.directory = pf.fullpath
-        self.data_style = data_style
-
-        # This also sets up the grid objects
-        self.read_global_header(header_filename,
-                                self.parameter_file.paranoid_read) 
-        self.read_particle_header()
-        self._cache_endianness(self.levels[-1].grids[-1])
-
-        super(CastroHierarchy, self).__init__(pf, data_style)
-        self._setup_data_io()
-        self._setup_field_list()
-        self._populate_hierarchy()
-
-    def read_global_header(self, filename, paranoid_read):
-        """ Read the global header file for an Castro plotfile output. """
-        counter = 0
-        header_file = open(filename, 'r')
-        self._global_header_lines = header_file.readlines()
-
-        # parse the file
-        self.castro_version = self._global_header_lines[0].rstrip()
-        self.n_fields = int(self._global_header_lines[1])
-
-        counter = self.n_fields + 2
-        self.field_list = []
-        for i, line in enumerate(self._global_header_lines[2:counter]):
-            self.field_list.append(line.rstrip())
-
-        # this is unused...eliminate it?
-        #for f in self.field_indexes:
-        #    self.field_list.append(castro2ytFieldsDict.get(f, f))
-
-        self.dimension = int(self._global_header_lines[counter])
-        if self.dimension != 3:
-            raise RunTimeError("Castro must be in 3D to use yt.")
-
-        counter += 1
-        self.Time = float(self._global_header_lines[counter])
-        counter += 1
-        self.finest_grid_level = int(self._global_header_lines[counter])
-        self.n_levels = self.finest_grid_level + 1
-        counter += 1
-
-        # quantities with _unnecessary are also stored in the inputs
-        # file and are not needed.  they are read in and stored in
-        # case in the future we want to enable a "backwards" way of
-        # taking the data out of the Header file and using it to fill
-        # in in the case of a missing inputs file
-        self.domainLeftEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.domainRightEdge_unnecessary = np.array(map(float, self._global_header_lines[counter].split()))
-        counter += 1
-        self.refinementFactor_unnecessary = self._global_header_lines[counter].split()
-        #np.array(map(int, self._global_header_lines[counter].split()))
-        counter += 1
-        self.globalIndexSpace_unnecessary = self._global_header_lines[counter]
-        #domain_re.search(self._global_header_lines[counter]).groups()
-        counter += 1
-        self.timestepsPerLevel_unnecessary = self._global_header_lines[counter]
-        counter += 1
-
-        self.dx = np.zeros((self.n_levels, 3))
-        for i, line in enumerate(self._global_header_lines[counter:counter+self.n_levels]):
-            self.dx[i] = np.array(map(float, line.split()))
-        counter += self.n_levels
-        self.geometry = int(self._global_header_lines[counter])
-        if self.geometry != 0:
-            raise RunTimeError("yt only supports cartesian coordinates.")
-        counter += 1
-
-        # this is just to debug. eventually it should go away.
-        linebreak = int(self._global_header_lines[counter])
-        if linebreak != 0:
-            raise RunTimeError("INTERNAL ERROR! Header is unexpected size")
-        counter += 1
-
-        # Each level is one group with ngrids on it. each grid has 3 lines of 2 reals
-        # BoxLib madness
-        self.levels = []
-        grid_counter = 0
-        file_finder_pattern = r"FabOnDisk: (\w+_D_[0-9]{4}) (\d+)\n"
-        re_file_finder = re.compile(file_finder_pattern)
-        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
-        re_dim_finder = re.compile(dim_finder_pattern)
-        data_files_pattern = r"Level_[\d]/"
-        data_files_finder = re.compile(data_files_pattern)
-
-        for level in range(0, self.n_levels):
-            tmp = self._global_header_lines[counter].split()
-            # Should this be grid_time or level_time??
-            lev, ngrids, grid_time = int(tmp[0]), int(tmp[1]), float(tmp[2])
-            counter += 1
-            nsteps = int(self._global_header_lines[counter])
-            counter += 1
-            self.levels.append(CastroLevel(lev, ngrids))
-            # Open level header, extract file names and offsets for each grid.
-            # Read slightly out of order here: at the end of the lo, hi pairs
-            # for x, y, z is a *list* of files types in the Level directory. 
-            # Each type has Header and a number of data files
-            # (one per processor)
-            tmp_offset = counter + 3*ngrids
-            nfiles = 0
-            key_off = 0
-            files =   {} # dict(map(lambda a: (a,[]), self.field_list))
-            offsets = {} # dict(map(lambda a: (a,[]), self.field_list))
-
-            while (nfiles + tmp_offset < len(self._global_header_lines) and
-                   data_files_finder.match(self._global_header_lines[nfiles+tmp_offset])):
-                filen = os.path.join(self.parameter_file.fullplotdir,
-                                     self._global_header_lines[nfiles+tmp_offset].strip())
-                # open each "_H" header file, and get the number of
-                # components within it
-                level_header_file = open(filen+'_H','r').read()
-                start_stop_index = re_dim_finder.findall(level_header_file) # just take the last one
-                grid_file_offset = re_file_finder.findall(level_header_file)
-                ncomp_this_file = int(level_header_file.split('\n')[2])
-
-                for i in range(ncomp_this_file):
-                    key = self.field_list[i+key_off]
-                    f, o = zip(*grid_file_offset)
-                    files[key] = f
-                    offsets[key] = o
-                    self.field_indexes[key] = i
-
-                key_off += ncomp_this_file
-                nfiles += 1
-
-            # convert dict of lists to list of dicts
-            fn = []
-            off = []
-            lead_path = os.path.join(self.parameter_file.fullplotdir,
-                                     'Level_%i' % level)
-            for i in range(ngrids):
-                fi = [os.path.join(lead_path, files[key][i]) for key in self.field_list]
-                of = [int(offsets[key][i]) for key in self.field_list]
-                fn.append(dict(zip(self.field_list, fi)))
-                off.append(dict(zip(self.field_list, of)))
-
-            for grid in range(0, ngrids):
-                gfn = fn[grid]  # filename of file containing this grid
-                gfo = off[grid] # offset within that file
-                xlo, xhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                ylo, yhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                zlo, zhi = map(float, self._global_header_lines[counter].split())
-                counter += 1
-                lo = np.array([xlo, ylo, zlo])
-                hi = np.array([xhi, yhi, zhi])
-                dims, start, stop = self._calculate_grid_dimensions(start_stop_index[grid])
-                self.levels[-1].grids.append(self.grid(lo, hi, grid_counter,
-                                                       level, gfn, gfo, dims,
-                                                       start, stop,
-                                                       paranoia=paranoid_read,  ### TODO: at least the code isn't schizophrenic paranoid
-                                                       hierarchy=self))
-                grid_counter += 1   # this is global, and shouldn't be reset
-                                    # for each level
-
-            # already read the filenames above...
-            counter += nfiles
-            self.num_grids = grid_counter
-            self.float_type = 'float64'
-
-        self.maxLevel = self.n_levels - 1
-        self.max_level = self.n_levels - 1
-        header_file.close()
-
-    def read_particle_header(self):
-        # We need to get particle offsets and particle counts
-        if not self.parameter_file.use_particles:
-            self.pgrid_info = np.zeros((self.num_grids, 3), dtype='int64')
-            return
-
-        self.field_list += castro_particle_field_names[:]
-        header = open(os.path.join(self.parameter_file.fullplotdir, "DM",
-                                   "Header"))
-        version = header.readline()
-        ndim = header.readline()
-        nfields = header.readline()
-        ntotalpart = int(header.readline())
-        dummy = header.readline() # nextid
-        maxlevel = int(header.readline()) # max level
-
-        # Skip over how many grids on each level; this is degenerate
-        for i in range(maxlevel+1): dummy = header.readline()
-        grid_info = np.fromiter((int(i)
-                                 for line in header.readlines()
-                                 for i in line.split()),
-                                dtype='int64',
-                                count=3*self.num_grids).reshape((self.num_grids, 3))
-        self.pgrid_info = grid_info
-
-    def _cache_endianness(self, test_grid):
-        """
-        Cache the endianness and bytes perreal of the grids by using a test grid
-        and assuming that all grids have the same endianness. This is a pretty
-        safe assumption since Castro uses one file per processor, and if you're
-        running on a cluster with different endian processors, then you're on
-        your own!
-
-        """
-        # open the test file and grab the header
-        in_file = open(os.path.expanduser(test_grid.filename[self.field_list[0]]), 'rb')
-        header = in_file.readline()
-        in_file.close()
-        header.strip()
-        # Parse it. The pattern is in castro.definitions.py
-        header_re = re.compile(castro_FAB_header_pattern)
-        bytes_per_real, endian, start, stop, centerType, n_components = header_re.search(header).groups()
-        self._bytes_per_real = int(bytes_per_real)
-        if self._bytes_per_real == int(endian[0]):
-            dtype = '<'
-        elif self._bytes_per_real == int(endian[-1]):
-            dtype = '>'
-        else:
-            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-        dtype += ('f%i' % self._bytes_per_real) # always a floating point
-        self._dtype = dtype
-
-    def _calculate_grid_dimensions(self, start_stop):
-        start = np.array(map(int, start_stop[0].split(',')))
-        stop = np.array(map(int, start_stop[1].split(',')))
-        dimension = stop - start + 1
-        return dimension, start, stop
-
-    def _populate_grid_objects(self):
-        mylog.debug("Creating grid objects")
-
-        self.grids = np.concatenate([level.grids for level in self.levels])
-        basedir = self.parameter_file.fullplotdir
-
-        for g, pg in itertools.izip(self.grids, self.pgrid_info):
-            g.particle_filename = os.path.join(
-                basedir, "DM", "Level_%s" % (g.Level), "DATA_%04i" % pg[0])
-            g.NumberOfParticles = pg[1]
-            g._particle_offset = pg[2]
-
-        self.grid_particle_count[:,0] = self.pgrid_info[:,1]
-        del self.pgrid_info
-
-        gls = np.concatenate([level.ngrids * [level.level] for level in self.levels])
-        self.grid_levels[:] = gls.reshape((self.num_grids,1))
-        grid_dcs = np.concatenate([level.ngrids * [self.dx[level.level]]
-                                  for level in self.levels], axis=0)
-
-        self.grid_dxs = grid_dcs[:,0].reshape((self.num_grids,1))
-        self.grid_dys = grid_dcs[:,1].reshape((self.num_grids,1))
-        self.grid_dzs = grid_dcs[:,2].reshape((self.num_grids,1))
-
-        left_edges = []
-        right_edges = []
-        dims = []
-        for level in self.levels:
-            left_edges += [g.LeftEdge for g in level.grids]
-            right_edges += [g.RightEdge for g in level.grids]
-            dims += [g.ActiveDimensions for g in level.grids]
-
-        self.grid_left_edge = np.array(left_edges)
-        self.grid_right_edge = np.array(right_edges)
-        self.grid_dimensions = np.array(dims)
-        self.gridReverseTree = [] * self.num_grids
-        self.gridReverseTree = [ [] for i in range(self.num_grids)]
-        self.gridTree = [ [] for i in range(self.num_grids)]
-
-        mylog.debug("Done creating grid objects")
-
-    def _populate_hierarchy(self):
-        self._setup_grid_tree()
-        #self._setup_grid_corners()
-
-        for i, grid in enumerate(self.grids):
-            if (i % 1e4) == 0:
-                mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
-
-            grid._prepare_grid()
-            grid._setup_dx()
-
-    def _setup_grid_tree(self):
-        mask = np.empty(self.grids.size, dtype='int32')
-        for i, grid in enumerate(self.grids):
-            get_box_grids_level(grid.LeftEdge, grid.RightEdge, grid.Level + 1,
-                                self.grid_left_edge, self.grid_right_edge,
-                                self.grid_levels, mask)
-            children = self.grids[mask.astype("bool")]
-            #assert(len(children) == len(self._get_grid_children(grid)))
-            for child in children:
-                self.gridReverseTree[child.id].append(i)
-                self.gridTree[i].append(weakref.proxy(child))
-
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        dd["field_indexes"] = self.field_indexes
-        AMRHierarchy._setup_classes(self, dd)
-        #self._add_object_class('grid', "CastroGrid", CastroGridBase, dd)
-        self.object_types.sort()
-
-    def _get_grid_children(self, grid):
-        mask = np.zeros(self.num_grids, dtype='bool')
-        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
-        mask[grid_ind] = True
-        mask = np.logical_and(mask, (self.grid_levels == (grid.Level+1)).flat)
-        return self.grids[mask]
-
-    def _setup_field_list(self):
-        if self.parameter_file.use_particles:
-            # We know which particle fields will exist -- pending further
-            # changes in the future.
-            for field in castro_particle_field_names:
-                def external_wrapper(f):
-                    def _convert_function(data):
-                        return data.convert(f)
-                    return _convert_function
-                cf = external_wrapper(field)
-                # Note that we call add_castro_field on the field_info directly.  This
-                # will allow the same field detection mechanism to work for 1D, 2D
-                # and 3D fields.
-                self.pf.field_info.add_castro_field(
-                        field, lambda a, b: None,
-                        convert_function=cf, take_log=False,
-                        particle_type=True)
-
-    ### TODO: check if this can be removed completely
-    def _count_grids(self):
-        """
-        this is already provided in ???
-
-        """
-        pass
-
-    def _initialize_grid_arrays(self):
-        mylog.debug("Allocating arrays for %s grids", self.num_grids)
-        self.grid_dimensions = np.ones((self.num_grids,3), 'int32')
-        self.grid_left_edge = np.zeros((self.num_grids,3), self.float_type)
-        self.grid_right_edge = np.ones((self.num_grids,3), self.float_type)
-        self.grid_levels = np.zeros((self.num_grids,1), 'int32')
-        self.grid_particle_count = np.zeros((self.num_grids,1), 'int32')
-
-    def _parse_hierarchy(self):
-        pass
-
-    def _detect_fields(self):
-        pass
-
-    def _setup_derived_fields(self):
-        pass
-
-    def _initialize_state_variables(self):
-        """override to not re-initialize num_grids in AMRHierarchy.__init__
-
-        """
-        self._parallel_locking = False
-        self._data_file = None
-        self._data_mode = None
-        self._max_locations = {}
-
-class CastroLevel:
-    def __init__(self, level, ngrids):
-        self.level = level
-        self.ngrids = ngrids
-        self.grids = []
-
-class CastroStaticOutput(StaticOutput):
-    """
-    This class is a stripped down class that simply reads and parses *filename*,
-    without looking at the Castro hierarchy.
-
-    """
-    _hierarchy_class = CastroHierarchy
-    _fieldinfo_fallback = CastroFieldInfo
-    _fieldinfo_known = KnownCastroFields
-
-    def __init__(self, plotname, paramFilename=None, fparamFilename=None,
-                 data_style='castro_native', paranoia=False,
-                 storage_filename = None):
-        """
-        Need to override for Castro file structure.
-
-        the paramfile is usually called "inputs"
-        and there may be a fortran inputs file usually called "probin"
-        plotname here will be a directory name
-        as per BoxLib, data_style will be one of
-         * Native
-         * IEEE (not implemented in yt)
-         * ASCII (not implemented in yt)
-
-        """
-        self.storage_filename = storage_filename
-        self.paranoid_read = paranoia
-        self.parameter_filename = paramFilename
-        self.fparameter_filename = fparamFilename
-        self.__ipfn = paramFilename
-        self.fparameters = {}
-        super(CastroStaticOutput, self).__init__(plotname.rstrip("/"),
-                                                 data_style='castro_native')
-
-
-        # These should maybe not be hardcoded?
-        ### TODO: this.
-        self.parameters["HydroMethod"] = 'castro' # always PPM DE
-        self.parameters["Time"] = 1.0 # default unit is 1...
-        self.parameters["DualEnergyFormalism"] = 0 # always off.
-        self.parameters["EOSType"] = -1 # default
-
-        if self.fparameters.has_key("mu"):
-            self.parameters["mu"] = self.fparameters["mu"]
-
-    def _localize(self, f, default):
-        if f is None:
-            return os.path.join(self.directory, default)
-        return f
-
-    @classmethod
-    def _is_valid(cls, *args, **kwargs):
-        # fill our args
-        pname = args[0].rstrip("/")
-        dn = os.path.dirname(pname)
-        if len(args) > 1:
-            kwargs['paramFilename'] = args[1]
-
-        pfname = kwargs.get("paramFilename", os.path.join(dn, "inputs"))
-
-        # We check for the job_info file's existence because this is currently
-        # what distinguishes Castro data from MAESTRO data.
-        ### ^ that is nuts
-        pfn = os.path.join(pfname)
-        if not os.path.exists(pfn):
-            return False
-        castro = any(("castro." in line for line in open(pfn)))
-        nyx = any(("nyx." in line for line in open(pfn)))
-        castro = castro and (not nyx) # it's only castro if it's not nyx
-        maestro = os.path.exists(os.path.join(pname, "job_info"))
-        orion = (not castro) and (not maestro)
-        return castro
-
-    def _parse_parameter_file(self):
-        """
-        Parses the parameter file and establishes the various dictionaries.
-
-        """
-        # Boxlib madness
-        self.fullplotdir = os.path.abspath(self.parameter_filename)
-        self._parse_header_file()
-        self.parameter_filename = self._localize(self.__ipfn, 'inputs')
-        self.fparameter_filename = self._localize(self.fparameter_filename, 'probin')
-        if os.path.isfile(self.fparameter_filename):
-            self._parse_fparameter_file()
-            for param in self.fparameters:
-                if castro2enzoDict.has_key(param):
-                    self.parameters[castro2enzoDict[param]] = self.fparameters[param]
-
-        # Let's read the file
-        self.unique_identifier = int(os.stat(self.parameter_filename)[ST_CTIME])
-        lines = open(self.parameter_filename).readlines()
-        self.use_particles = False
-
-        for line in lines:
-            if line.find("#") >= 1: # Keep the commented lines...
-                line = line[:line.find("#")]
-            line = line.strip().rstrip()
-            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
-                continue
-
-            try:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-            except ValueError:
-                mylog.error("ValueError: '%s'", line)
-
-            if castro2enzoDict.has_key(param):
-                paramName = castro2enzoDict[param]
-                t = map(parameterDict[paramName], vals.split())
-                if len(t) == 1:
-                    self.parameters[paramName] = t[0]
-                else:
-                    if paramName == "RefineBy":
-                        self.parameters[paramName] = t[0]
-                    else:
-                        self.parameters[paramName] = t
-            elif param.startswith("geometry.prob_hi"):
-                self.domain_right_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("geometry.prob_lo"):
-                self.domain_left_edge = np.array([float(i) for i in vals.split()])
-            elif param.startswith("particles.write_in_plotfile"):
-                self.use_particles = boxlib_bool_to_int(vals)
-            self.fparameters[param] = vals
-
-        self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
-        self.dimensionality = self.parameters["TopGridRank"]
-        self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
-        self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
-        self.refine_by = self.parameters.get("RefineBy", 2)
-
-        if (self.parameters.has_key("ComovingCoordinates") and
-            bool(self.parameters["ComovingCoordinates"])):
-            self.cosmological_simulation = 1
-            self.omega_lambda = self.parameters["CosmologyOmegaLambdaNow"]
-            self.omega_matter = self.parameters["CosmologyOmegaMatterNow"]
-            self.hubble_constant = self.parameters["CosmologyHubbleConstantNow"]
-
-            # Stupid that we have to read a separate file for this :/
-            a_file = open(os.path.join(self.fullplotdir, "comoving_a"))
-            line = a_file.readline().strip()
-            a_file.close()
-
-            self.parameters["CosmologyCurrentRedshift"] = 1 / float(line) - 1
-            self.cosmological_scale_factor = float(line)
-            self.current_redshift = self.parameters["CosmologyCurrentRedshift"]
-        else:
-            ### TODO: make these defaults automatic
-            self.current_redshift = self.omega_lambda = self.omega_matter = \
-                self.hubble_constant = self.cosmological_simulation = 0.0
-
-    def _parse_fparameter_file(self):
-        """
-        Parses the fortran parameter file for Castro. Most of this will be
-        useless, but this is where it keeps mu = mass per particle/m_hydrogen.
-
-        """
-        lines = open(self.fparameter_filename).readlines()
-        for line in lines:
-            if line.count("=") == 1:
-                param, vals = map(strip, map(rstrip, line.split("=")))
-                if vals.count("'") == 0 and vals.count("\"") == 0:
-                    try:
-                        t = map(float, [a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
-                    except ValueError:
-                        print "Failed on line", line
-                else:
-                    t = vals.split()
-                if len(t) == 1:
-                    self.fparameters[param] = t[0]
-                else:
-                    self.fparameters[param] = t
-
-    def _parse_header_file(self):
-        """
-        Parses the BoxLib header file to get any parameters stored there.
-        Hierarchy information is read out of this file in CastroHierarchy. 
-
-        Currently, only Time is read here.
-
-        """
-        header_file = open(os.path.join(self.fullplotdir, "Header"))
-        lines = header_file.readlines()
-        header_file.close()
-        n_fields = int(lines[1])
-        self.current_time = float(lines[3 + n_fields])
-
-    def _set_units(self):
-        """
-        Generates the conversion to various physical _units based on the
-        parameter file.
-
-        """
-        self.units = {}
-        self.time_units = {}
-
-        if len(self.parameters) == 0:
-            self._parse_parameter_file()
-
-        if self.cosmological_simulation:
-            cf = 1e5 * self.cosmological_scale_factor   # Where does the 1e5 come from?
-            for ax in 'xyz':
-                self.units['particle_velocity_%s' % ax] = cf
-            self.units['particle_mass'] = 1.989e33  ### TODO: Make a global solar mass def
-
-        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
-        if not self.has_key("TimeUnits"):
-            mylog.warning("No time units. Setting 1.0 = 1 second.")
-            self.conversion_factors["Time"] = 1.0
-        for unit in mpc_conversion.keys():
-            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
-
-        self.conversion_factors = defaultdict(lambda: 1.0)
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['unitary'] = 1.0 / (self.domain_right_edge - self.domain_left_edge).max()
-        seconds = 1 #self["Time"]
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = seconds / sec_conversion[unit]
-        for key in yt2castroFieldsDict:
-            self.conversion_factors[key] = 1.0
-        for key in castro_particle_field_names:
-            self.conversion_factors[key] = 1.0
-
-    def _setup_nounits_units(self):
-        z = 0

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/definitions.py
--- a/yt/frontends/castro/definitions.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-Various definitions for various other modules and routines
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-from yt.funcs import *
-
-def boxlib_bool_to_int(v):
-    try:
-        return int(v)
-    except ValueError:
-        pass
-    v = v.upper().strip()
-    if v[0] == 'T':
-        return 1
-    elif v[0] == 'F':
-        return 0
-
-# TODO: get rid of enzo parameters we do not need
-parameterDict = {"CosmologyCurrentRedshift": float,
-                 "CosmologyComovingBoxSize": float,
-                 "CosmologyOmegaMatterNow": float,
-                 "CosmologyOmegaLambdaNow": float,
-                 "CosmologyHubbleConstantNow": float,
-                 "CosmologyInitialRedshift": float,
-                 "DualEnergyFormalismEta1": float,
-                 "DualEnergyFormalismEta2": float,
-                 "MetaDataString": str,
-                 "HydroMethod": int,
-                 "DualEnergyFormalism": int,
-                 "InitialTime": float,
-                 "ComovingCoordinates": boxlib_bool_to_int,
-                 "DensityUnits": float,
-                 "LengthUnits": float,
-                 "LengthUnit": float,
-                 "TemperatureUnits": float,
-                 "TimeUnits": float,
-                 "GravitationalConstant": float,
-                 "Gamma": float,
-                 "MultiSpecies": int,
-                 "CompilerPrecision": str,
-                 "CurrentTimeIdentifier": int,
-                 "RefineBy": int,
-                 "BoundaryConditionName": str,
-                 "TopGridRank": int,
-                 "TopGridDimensions": int,
-                 "EOSSoundSpeed": float,
-                 "EOSType": int,
-                 "NumberOfParticleAttributes": int,
-                }
-
-# converts the Castro inputs file name to the Enzo/yt name expected
-# throughout the code. key is Castro name, value is Enzo/yt equivalent
-castro2enzoDict = {"amr.n_cell": "TopGridDimensions",
-                  "materials.gamma": "Gamma",
-                  "amr.ref_ratio": "RefineBy",
-                  "castro.use_comoving": "ComovingCoordinates",
-                  "castro.redshift_in": "CosmologyInitialRedshift",
-                  "comoving_OmL": "CosmologyOmegaLambdaNow",
-                  "comoving_OmM": "CosmologyOmegaMatterNow",
-                  "comoving_h": "CosmologyHubbleConstantNow"
-                  }
-
-yt2castroFieldsDict = {}
-castro2ytFieldsDict = {}
-
-castro_FAB_header_pattern = r"^FAB \(\((\d+), \([0-9 ]+\)\),\(\d+, \(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n"
-
-castro_particle_field_names = \
-    ['particle_position_%s' % ax for ax in 'xyz'] + \
-    ['particle_mass'] +  \
-    ['particle_velocity_%s' % ax for ax in 'xyz']

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/fields.py
--- a/yt/frontends/castro/fields.py
+++ /dev/null
@@ -1,169 +0,0 @@
-"""
-Castro-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    NullFunc, \
-    TranslationFunc, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-from yt.utilities.physical_constants import mh, kboltz
-
-translation_dict = {
-    "x-velocity": "xvel",
-    "y-velocity": "yvel",
-    "z-velocity": "zvel",
-    "Density": "density",
-    "Total_Energy": "eden",
-    "Temperature": "temperature",
-    "x-momentum": "xmom",
-    "y-momentum": "ymom",
-    "z-momentum": "zmom"
-}
-
-# Setup containers for fields possibly in the output files
-KnownCastroFields = FieldInfoContainer()
-add_castro_field = KnownCastroFields.add_field
-
-# and always derived ones
-CastroFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_field = CastroFieldInfo.add_field
-
-# Start adding fields
-add_castro_field("density", function=NullFunc, take_log=True,
-                 units=r"\rm{g}/\rm{cm}^3")
-
-# fix projected units
-KnownCastroFields["density"]._projected_units = r"\rm{g}/\rm{cm}^2"
-
-add_castro_field("eden", function=NullFunc, take_log=True,
-                 validators = [ValidateDataField("eden")],
-                 units=r"\rm{erg}/\rm{cm}^3")
-
-add_castro_field("xmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("xmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-add_castro_field("ymom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("ymom")],
-                 units=r"\rm{gm}/\rm{cm^2\ s}")
-
-add_castro_field("zmom", function=NullFunc, take_log=False,
-                 validators = [ValidateDataField("zmom")],
-                 units=r"\rm{g}/\rm{cm^2\ s}")
-
-# Now populate derived fields
-for mine, theirs in translation_dict.items():
-    if KnownCastroFields.has_key(theirs):
-        add_field(theirs, function=TranslationFunc(mine),
-                  take_log=KnownCastroFields[theirs].take_log)
-
-# Now fallbacks, in case these fields are not output
-def _xVelocity(field, data):
-    """ Generate x-velocity from x-momentum and density. """
-    return data["xmom"] / data["density"]
-
-add_field("x-velocity", function=_xVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _yVelocity(field, data):
-    """ Generate y-velocity from y-momentum and density. """
-    return data["ymom"] / data["density"]
-
-add_field("y-velocity", function=_yVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _zVelocity(field, data):
-    """ Generate z-velocity from z-momentum and density. """
-    return data["zmom"] / data["density"]
-
-add_field("z-velocity", function=_zVelocity, take_log=False,
-          units=r'\rm{cm}/\rm{s}')
-
-def _ThermalEnergy(field, data):
-    """
-    Generate thermal (gas energy). Dual Energy Formalism was implemented by
-    Stella, but this isn't how it's called, so I'll leave that commented out for
-    now.
-
-    """
-    #if data.pf["DualEnergyFormalism"]:
-    #    return data["Gas_Energy"]
-    #else:
-    return data["Total_Energy"] - 0.5 * data["density"] * (
-        data["x-velocity"]**2.0
-        + data["y-velocity"]**2.0
-        + data["z-velocity"]**2.0 )
-
-add_field("ThermalEnergy", function=_ThermalEnergy,
-          units=r"\rm{ergs}/\rm{cm^3}")
-
-def _Pressure(field, data):
-    """
-    M{(Gamma-1.0)*e, where e is thermal energy density
-    
-    NB: this will need to be modified for radiation
-
-    """
-    return (data.pf["Gamma"] - 1.0) * data["ThermalEnergy"]
-
-add_field("Pressure", function=_Pressure, units=r"\rm{dyne}/\rm{cm}^{2}")
-
-def _Temperature(field, data):
-    return ((data.pf["Gamma"] - 1.0) * data.pf["mu"] * mh *
-            data["ThermalEnergy"] / (kboltz * data["Density"]))
-
-add_field("Temperature", function=_Temperature, units=r"\rm{Kelvin}",
-          take_log=False)
-
-def _convertParticleMassMsun(data):
-    return 1.0 / 1.989e33
-def _ParticleMassMsun(field, data):
-    return data["particle_mass"]
-
-add_field("ParticleMassMsun",
-          function=_ParticleMassMsun, validators=[ValidateSpatial(0)],
-          particle_type=True, convert_function=_convertParticleMassMsun,
-          particle_convert_function=_ParticleMassMsun)
-
-# Fundamental fields that are usually/always output:
-#   density
-#   xmom
-#   ymom
-#   zmom
-#   rho_E
-#   rho_e
-#   Temp
-#
-# "Derived" fields that are sometimes output:
-#   x_velocity
-#   y_velocity
-#   z_velocity
-#   magvel
-#   grav_x
-#   grav_y
-#   grav_z
-#   maggrav
-#   magvort
-#   pressure
-#   entropy
-#   divu
-#   eint_e (e as derived from the "rho e" variable)
-#   eint_E (e as derived from the "rho E" variable)

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/io.py
--- a/yt/frontends/castro/io.py
+++ /dev/null
@@ -1,124 +0,0 @@
-"""
-Castro data-file handling functions
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import os
-import numpy as np
-from yt.utilities.io_handler import \
-           BaseIOHandler
-from yt.utilities.lib import \
-            read_castro_particles
-
-from definitions import \
-    yt2castroFieldsDict, \
-    castro_particle_field_names
-
-class IOHandlerNative(BaseIOHandler):
-
-    _data_style = "castro_native"
-
-    def modify(self, field):
-        return field.swapaxes(0,2)
-
-    def _read_particle_field(self, grid, field):
-        offset = grid._particle_offset
-        filen = os.path.expanduser(grid.particle_filename)
-        off = grid._particle_offset
-        tr = np.zeros(grid.NumberOfParticles, dtype='float64')
-        read_castro_particles(filen, off,
-            castro_particle_field_names.index(field),
-            len(castro_particle_field_names),
-            tr)
-        return tr
-
-    def _read_data(self, grid, field):
-        """
-        reads packed multiFABs output by BoxLib in "NATIVE" format.
-
-        """
-        if field in castro_particle_field_names:
-            return self._read_particle_field(grid, field)
-        filen = os.path.expanduser(grid.filename[field])
-        off = grid._offset[field]
-        inFile = open(filen,'rb')
-        inFile.seek(off)
-        header = inFile.readline()
-        header.strip()
-
-        if grid._paranoid:
-            mylog.warn("Castro Native reader: Paranoid read mode.")
-            headerRe = re.compile(castro_FAB_header_pattern)
-            bytesPerReal, endian, start, stop, centerType, nComponents = headerRe.search(header).groups()
-
-            # we will build up a dtype string, starting with endian
-            # check endianness (this code is ugly. fix?)
-            bytesPerReal = int(bytesPerReal)
-            if bytesPerReal == int(endian[0]):
-                dtype = '<'
-            elif bytesPerReal == int(endian[-1]):
-                dtype = '>'
-            else:
-                raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
-
-            dtype += ('f%i'% bytesPerReal) #always a floating point
-
-            # determine size of FAB
-            start = np.array(map(int, start.split(',')))
-            stop = np.array(map(int, stop.split(',')))
-
-            gridSize = stop - start + 1
-
-            error_count = 0
-            if (start != grid.start).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid start." %grid.filename
-                error_count += 1
-            if (stop != grid.stop).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid stop." %grid.filename
-                error_count += 1
-            if (gridSize != grid.ActiveDimensions).any():
-                print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %grid.filename
-                error_count += 1
-            if bytesPerReal != grid.hierarchy._bytes_per_real:
-                print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %grid.filename
-                error_count += 1
-            if (bytesPerReal == grid.hierarchy._bytes_per_real and dtype != grid.hierarchy._dtype):
-                print "Paranoia Error: Cell_H and %s do not agree on endianness." %grid.filename
-                error_count += 1
-
-            if error_count > 0:
-                raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, grid.filename))
-
-        else:
-            start = grid.start_index
-            stop = grid.stop_index
-            dtype = grid.hierarchy._dtype
-            bytesPerReal = grid.hierarchy._bytes_per_real
-
-        nElements = grid.ActiveDimensions.prod()
-
-        # one field has nElements*bytesPerReal bytes and is located
-        # nElements*bytesPerReal*field_index from the offset location
-        if yt2castroFieldsDict.has_key(field):
-            fieldname = yt2castroFieldsDict[field]
-        else:
-            fieldname = field
-        field_index = grid.field_indexes[fieldname]
-        inFile.seek(int(nElements*bytesPerReal*field_index),1)
-        field = np.fromfile(inFile, count=nElements, dtype=dtype)
-        field = field.reshape(grid.ActiveDimensions[::-1]).swapaxes(0,2)
-
-        # we can/should also check against the max and min in the header file
-
-        inFile.close()
-        return field

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/castro/setup.py
--- a/yt/frontends/castro/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('castro', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -225,6 +225,7 @@
         self.object_types.sort()
 
     def _count_grids(self):
+        self.num_grids = None
         test_grid = test_grid_id = None
         self.num_stars = 0
         for line in rlines(open(self.hierarchy_filename, "rb")):
@@ -235,8 +236,11 @@
             if line.startswith("NumberOfStarParticles"):
                 self.num_stars = int(line.split("=")[-1])
             if line.startswith("Grid "):
-                self.num_grids = test_grid_id = int(line.split("=")[-1])
-                break
+                if self.num_grids is None:
+                    self.num_grids = int(line.split("=")[-1])
+                test_grid_id = int(line.split("=")[-1])
+                if test_grid is not None:
+                    break
         self._guess_data_style(self.pf.dimensionality, test_grid, test_grid_id)
 
     def _guess_data_style(self, rank, test_grid, test_grid_id):
@@ -293,7 +297,7 @@
         self.grids[0].Level = 0
         si, ei, LE, RE, fn, npart = [], [], [], [], [], []
         all = [si, ei, LE, RE, fn]
-        pbar = get_pbar("Parsing Hierarchy", self.num_grids)
+        pbar = get_pbar("Parsing Hierarchy ", self.num_grids)
         for grid_id in xrange(self.num_grids):
             pbar.update(grid_id)
             # We will unroll this list

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -404,7 +404,7 @@
     amr_utils.CICDeposit_3(data["particle_position_x"][filter].astype(np.float64),
                            data["particle_position_y"][filter].astype(np.float64),
                            data["particle_position_z"][filter].astype(np.float64),
-                           data["particle_mass"][filter],
+                           data["particle_mass"][filter].astype(np.float64),
                            num,
                            blank, np.array(data.LeftEdge).astype(np.float64),
                            np.array(data.ActiveDimensions).astype(np.int32), 

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/__init__.py
--- a/yt/frontends/gadget/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.gadget
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/api.py
--- a/yt/frontends/gadget/api.py
+++ /dev/null
@@ -1,26 +0,0 @@
-"""
-API for yt.frontends.gadget
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-from .data_structures import \
-      GadgetGrid, \
-      GadgetHierarchy, \
-      GadgetStaticOutput
-
-from .fields import \
-      GadgetFieldInfo, \
-      add_gadget_field
-
-from .io import \
-      IOHandlerGadget

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/data_structures.py
--- a/yt/frontends/gadget/data_structures.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""
-Data structures for Gadget.
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-from itertools import izip
-
-from yt.funcs import *
-from yt.data_objects.grid_patch import \
-    AMRGridPatch
-from yt.data_objects.hierarchy import \
-    AMRHierarchy
-from yt.data_objects.static_output import \
-    StaticOutput
-from yt.utilities.definitions import \
-    sec_conversion
-
-from .fields import GadgetFieldInfo, KnownGadgetFields
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, NullFunc
-
-class GadgetGrid(AMRGridPatch):
-    _id_offset = 0
-    def __init__(self, hierarchy, id, dimensions, start,
-                 level, parent_id, particle_count):
-        AMRGridPatch.__init__(self, id, filename = hierarchy.filename,
-                              hierarchy = hierarchy)
-        self.Parent = [] # Only one parent per grid        
-        self.Children = []
-        self.Level = level
-        self.ActiveDimensions = dimensions.copy()
-        self.NumberOfParticles = particle_count
-        self.start_index = start.copy().astype("int64")
-        self.stop_index = self.start_index + dimensions.copy()
-        self.id = id
-        self._parent_id = parent_id
-        
-        try:
-            padd = '/data/grid_%010i/particles' % id
-            self.particle_types = self.hierarchy._handle[padd].keys()
-        except:
-            self.particle_types =  ()
-        self.filename = hierarchy.filename
-        
-    def __repr__(self):
-        return 'GadgetGrid_%05i'%self.id
-        
-class GadgetHierarchy(AMRHierarchy):
-    grid = GadgetGrid
-
-    def __init__(self, pf, data_style='gadget_hdf5'):
-        self.filename = pf.filename
-        self.directory = os.path.dirname(pf.filename)
-        self.data_style = data_style
-        self._handle = h5py.File(pf.filename)
-        AMRHierarchy.__init__(self, pf, data_style)
-        self._handle.close()
-        self._handle = None
-        
-
-    def _initialize_data_storage(self):
-        pass
-
-    def _detect_fields(self):
-        #this adds all the fields in 
-        #/particle_types/{Gas/Stars/etc.}/{position_x, etc.}
-        self.field_list = []
-        for ptype in self._handle['particle_types'].keys():
-            for field in self._handle['particle_types'+'/'+ptype]:
-                if field not in self.field_list:
-                    self.field_list += field,
-        
-    def _setup_classes(self):
-        dd = self._get_data_reader_dict()
-        AMRHierarchy._setup_classes(self, dd)
-        self.object_types.sort()
-
-    def _count_grids(self):
-        self.num_grids = len(self._handle['/grid_dimensions'])
-        
-    def _parse_hierarchy(self):
-        f = self._handle # shortcut
-        npa = np.array
-        DLE = self.parameter_file.domain_left_edge
-        DRE = self.parameter_file.domain_right_edge
-        DW = (DRE - DLE)
-        
-        self.grid_levels.flat[:] = f['/grid_level'][:].astype("int32")
-        LI = f['/grid_left_index'][:]
-        print LI
-        self.grid_dimensions[:] = f['/grid_dimensions'][:]
-        self.grid_left_edge[:]  = (LI * DW + DLE)
-        dxs = 1.0/(2**(self.grid_levels+1)) * DW
-        self.grid_right_edge[:] = self.grid_left_edge \
-                                + dxs *(1 + self.grid_dimensions)
-        self.grid_particle_count.flat[:] = f['/grid_particle_count'][:].astype("int32")
-        grid_parent_id = f['/grid_parent_id'][:]
-        self.max_level = np.max(self.grid_levels)
-        
-        args = izip(xrange(self.num_grids), self.grid_levels.flat,
-                    grid_parent_id, LI,
-                    self.grid_dimensions, self.grid_particle_count.flat)
-        self.grids = np.empty(len(args), dtype='object')
-        for gi, (j,lvl,p, le, d, n) in enumerate(args):
-            self.grids[gi] = self.grid(self,j,d,le,lvl,p,n)
-        
-    def _populate_grid_objects(self):    
-        for g in self.grids:
-            if g._parent_id >= 0:
-                parent = self.grids[g._parent_id]
-                g.Parent = parent
-                parent.Children.append(g)
-        for g in self.grids:
-            g._prepare_grid()
-            g._setup_dx()
-            
-    def _setup_derived_fields(self):
-        self.derived_field_list = []
-
-class GadgetStaticOutput(StaticOutput):
-    _hierarchy_class = GadgetHierarchy
-    _fieldinfo_fallback = GadgetFieldInfo
-    _fieldinfo_known = KnownGadgetFields
-
-    def __init__(self, filename,storage_filename=None) :
-        self.storage_filename = storage_filename
-        self.filename = filename
-        
-        StaticOutput.__init__(self, filename, 'gadget_infrastructure')
-        self._set_units()
-        
-    def _set_units(self):
-        self.units = {}
-        self.time_units = {}
-        self.time_units['1'] = 1
-        self.units['1'] = 1.0
-        self.units['cm'] = 1.0
-        self.units['unitary'] = 1.0 / \
-            (self.domain_right_edge - self.domain_left_edge).max()
-        for unit in sec_conversion.keys():
-            self.time_units[unit] = 1.0 / sec_conversion[unit]
-
-    def _parse_parameter_file(self):
-        fileh = h5py.File(self.filename)
-        sim_param = fileh['/simulation_parameters'].attrs
-        self.refine_by = sim_param['refine_by']
-        self.dimensionality = sim_param['dimensionality']
-        self.num_ghost_zones = sim_param['num_ghost_zones']
-        self.field_ordering = sim_param['field_ordering']
-        self.domain_dimensions = sim_param['domain_dimensions']
-        self.current_time = sim_param['current_time']
-        self.domain_left_edge = sim_param['domain_left_edge']
-        self.domain_right_edge = sim_param['domain_right_edge']
-        self.unique_identifier = sim_param['unique_identifier']
-        self.cosmological_simulation = sim_param['cosmological_simulation']
-        self.current_redshift = sim_param['current_redshift']
-        self.omega_lambda = sim_param['omega_lambda']
-        self.hubble_constant = sim_param['hubble_constant']
-        fileh.close()
-        
-         
-    @classmethod
-    def _is_valid(self, *args, **kwargs):
-        format = 'Gadget Infrastructure'
-        add1 = 'griddded_data_format'
-        add2 = 'data_software'
-        try:
-            fileh = h5py.File(args[0],'r')
-            if add1 in fileh['/'].items():
-                if add2 in fileh['/'+add1].attrs.keys():
-                    if fileh['/'+add1].attrs[add2] == format:
-                        fileh.close()
-                        return True
-            fileh.close()
-        except:
-            pass
-        return False

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/fields.py
--- a/yt/frontends/gadget/fields.py
+++ /dev/null
@@ -1,123 +0,0 @@
-"""
-Gadget-specific fields
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import \
-    FieldInfoContainer, \
-    FieldInfo, \
-    ValidateParameter, \
-    ValidateDataField, \
-    ValidateProperty, \
-    ValidateSpatial, \
-    ValidateGridType
-import yt.data_objects.universal_fields
-
-GadgetFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
-add_gadget_field = GadgetFieldInfo.add_field
-
-add_field = add_gadget_field
-
-translation_dict = {"particle_position_x" : "position_x",
-                    "particle_position_y" : "position_y",
-                    "particle_position_z" : "position_z",
-                   }
-
-def _generate_translation(mine, theirs):
-    pfield = mine.startswith("particle")
-    add_field(theirs, function=lambda a, b: b[mine], take_log=True,
-              particle_type = pfield)
-
-for f,v in translation_dict.items():
-    if v not in GadgetFieldInfo:
-        # Note here that it's the yt field that we check for particle nature
-        pfield = f.startswith("particle")
-        add_field(v, function=lambda a,b: None, take_log=False,
-                  validators = [ValidateDataField(v)],
-                  particle_type = pfield)
-    print "Setting up translator from %s to %s" % (v, f)
-    _generate_translation(v, f)
-
-
-#for f,v in translation_dict.items():
-#    add_field(f, function=lambda a,b: None, take_log=True,
-#        validators = [ValidateDataField(v)],
-#        units=r"\rm{cm}")
-#    add_field(v, function=lambda a,b: None, take_log=True,
-#        validators = [ValidateDataField(v)],
-#        units=r"\rm{cm}")
-          
-
-          
-add_field("position_x", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_x")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("position_y", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_y")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("position_z", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("position_z")],
-          particle_type = True,
-          units=r"\rm{cm}")
-
-add_field("VEL", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("VEL")],
-          units=r"")
-
-add_field("id", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ID")],
-          units=r"")
-
-add_field("mass", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("mass")],
-          units=r"\rm{g}")
-def _particle_mass(field, data):
-    return data["mass"]/just_one(data["CellVolume"])
-def _convert_particle_mass(data):
-    return 1.0
-add_field("particle_mass", function=_particle_mass, take_log=True,
-          convert_function=_convert_particle_mass,
-          validators = [ValidateSpatial(0)],
-          units=r"\mathrm{g}\/\mathrm{cm}^{-3}")
-
-add_field("U", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("U")],
-          units=r"")
-
-add_field("NE", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("NE")],
-          units=r"")
-
-add_field("POT", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("POT")],
-          units=r"")
-
-add_field("ACCE", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ACCE")],
-          units=r"")
-
-add_field("ENDT", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("ENDT")],
-          units=r"")
-
-add_field("TSTP", function=lambda a,b: None, take_log=True,
-          validators = [ValidateDataField("TSTP")],
-          units=r"")
-

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/io.py
--- a/yt/frontends/gadget/io.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-Gadget-specific data-file handling function
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import h5py
-import numpy as np
-
-from yt.utilities.io_handler import \
-    BaseIOHandler
-
-class IOHandlerGadget(BaseIOHandler):
-    _data_style = 'gadget_infrastructure'
-    def _read_data(self, grid, field):
-        data = []
-        fh = h5py.File(grid.filename,mode='r')
-        for ptype in grid.particle_types:
-            address = '/data/grid_%010i/particles/%s/%s' % (grid.id, ptype, field)
-            data.append(fh[address][:])
-        if len(data) > 0:
-            data = np.concatenate(data)
-        fh.close()
-        return np.array(data)
-    def _read_field_names(self,grid): 
-        adr = grid.Address
-        fh = h5py.File(grid.filename,mode='r')
-        rets = cPickle.loads(fh['/root'].attrs['fieldnames'])
-        fh.close()
-        return rets
-
-    def _read_data_slice(self,grid, field, axis, coord):
-        #how would we implement axis here?
-        dat = self._read_data(grid,field)
-        return dat[coord]
-

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/gadget/setup.py
--- a/yt/frontends/gadget/setup.py
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
-
-
-def configuration(parent_package='', top_path=None):
-    from numpy.distutils.misc_util import Configuration
-    config = Configuration('gadget', parent_package, top_path)
-    config.make_config_py()  # installs __config__.py
-    #config.make_svn_version_py()
-    return config

diff -r 02d68ec508c0bb55f0ee38aabed5bf05bb349728 -r 2a282b183110fb8cc7c88a3bef05d52a3b4e6a97 yt/frontends/maestro/__init__.py
--- a/yt/frontends/maestro/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-API for yt.frontends.maestro
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------

This diff is so big that we needed to truncate the remainder.

Repository URL: https://bitbucket.org/yt_analysis/yt/

--

This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.



More information about the yt-svn mailing list