[Yt-svn] yt-commit r968 - in trunk/yt: . lagos

joishi at wrangler.dreamhost.com joishi at wrangler.dreamhost.com
Wed Dec 3 08:14:26 PST 2008


Author: joishi
Date: Wed Dec  3 08:14:26 2008
New Revision: 968
URL: http://yt.spacepope.org/changeset/968

Log:
* merged Orion/BoxLib support from yt-generalization branch. seems to mostly work...


Added:
   trunk/yt/lagos/OrionFields.py
      - copied unchanged from r967, /branches/yt-generalization/yt/lagos/OrionFields.py
Modified:
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/DataReadingFuncs.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/__init__.py
   trunk/yt/mods.py

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Wed Dec  3 08:14:26 2008
@@ -458,3 +458,46 @@
             self.filename = os.path.join(self.hierarchy.directory, filename)
         return
 
+class OrionGridBase(AMRGridPatch):
+    _id_offset = 0
+    def __init__(self, LeftEdge, RightEdge, index, level, filename, offset, dimensions,start,stop,paranoia=False):
+        AMRGridPatch.__init__(self, index)
+        self._file_access_pooling = False
+        self.filename = filename
+        self._offset = offset
+        self._paranoid = paranoia
+        
+        # should error check this
+        self.ActiveDimensions = dimensions.copy()#.transpose()
+        self.start = start.copy()#.transpose()
+        self.stop = stop.copy()#.transpose()
+        self.LeftEdge  = LeftEdge.copy()
+        self.RightEdge = RightEdge.copy()
+        self.index = index
+        self.Level = level
+
+    def get_global_startindex(self):
+        return self.start + na.rint(self.pf["DomainLeftEdge"]/self.dx)
+
+    def _prepare_grid(self):
+        """
+        Copies all the appropriate attributes from the hierarchy
+        """
+        # This is definitely the slowest part of generating the hierarchy
+        # Now we give it pointers to all of its attributes
+        # Note that to keep in line with Enzo, we have broken PEP-8
+        h = self.hierarchy # cache it
+        self.StartIndices = h.gridStartIndices[self.id]
+        self.EndIndices = h.gridEndIndices[self.id]
+        h.gridLevels[self.id,0] = self.Level
+        h.gridLeftEdge[self.id,:] = self.LeftEdge[:]
+        h.gridRightEdge[self.id,:] = self.RightEdge[:]
+        self.Time = h.gridTimes[self.id,0]
+        self.NumberOfParticles = h.gridNumberOfParticles[self.id,0]
+        self.Children = h.gridTree[self.id]
+        pIDs = h.gridReverseTree[self.id]
+        if len(pIDs) > 0:
+            self.Parent = [weakref.proxy(h.grids[pID]) for pID in pIDs]
+        else:
+            self.Parent = []
+

Modified: trunk/yt/lagos/DataReadingFuncs.py
==============================================================================
--- trunk/yt/lagos/DataReadingFuncs.py	(original)
+++ trunk/yt/lagos/DataReadingFuncs.py	Wed Dec  3 08:14:26 2008
@@ -285,6 +285,13 @@
     def preload(self, grids, sets):
         pass
 
+class DataQueueNative(BaseDataQueue):
+    def _read_set(self, grid, field):
+        return readDataNative(grid, field)
+
+    def modify(self, field):
+        return field.swapaxes(0,2)
+
 class DataQueuePacked2D(BaseDataQueue):
     def _read_set(self, grid, field):
         return HDF5LightReader.ReadData(grid.filename,
@@ -300,3 +307,94 @@
 
     def modify(self, field):
         pass
+
+#
+# BoxLib/Orion data readers follow
+#
+def readDataNative(self,field):
+    """
+    reads packed multiFABs output by BoxLib in "NATIVE" format.
+
+    """
+    inFile = open(os.path.expanduser(self.filename),'rb')
+    inFile.seek(self._offset)
+    header = inFile.readline()
+    header.strip()
+
+    if self._paranoid:
+        mylog.warn("Orion Native reader: Paranoid read mode.")
+        headerRe = re.compile(orion_FAB_header_pattern)
+        bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
+
+        # we will build up a dtype string, starting with endian
+        # check endianness (this code is ugly. fix?)
+        bytesPerReal = int(bytesPerReal)
+        if bytesPerReal == int(endian[0]):
+            dtype = '<'
+        elif bytesPerReal == int(endian[-1]):
+            dtype = '>'
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        dtype += ('f%i'% bytesPerReal) #always a floating point
+
+        # determine size of FAB
+        start = na.array(map(int,start.split(',')))
+        stop = na.array(map(int,stop.split(',')))
+
+        gridSize = stop - start + 1
+
+        error_count = 0
+        if (start != self.start).any():
+            print "Paranoia Error: Cell_H and %s do not agree on grid start." %self.filename
+            error_count += 1
+        if (stop != self.stop).any():
+            print "Paranoia Error: Cell_H and %s do not agree on grid stop." %self.filename
+            error_count += 1
+        if (gridSize != self.ActiveDimensions).any():
+            print "Paranoia Error: Cell_H and %s do not agree on grid dimensions." %self.filename
+            error_count += 1
+        if bytesPerReal != self.hierarchy._bytesPerReal:
+            print "Paranoia Error: Cell_H and %s do not agree on bytes per real number." %self.filename
+            error_count += 1
+        if (bytesPerReal == self.hierarchy._bytesPerReal and dtype != self.hierarchy._dtype):
+            print "Paranoia Error: Cell_H and %s do not agree on endianness." %self.filename
+            error_count += 1
+
+        if error_count > 0:
+            raise RunTimeError("Paranoia unveiled %i differences between Cell_H and %s." % (error_count, self.filename))
+
+    else:
+        start = self.start
+        stop = self.stop
+        dtype = self.hierarchy._dtype
+        bytesPerReal = self.hierarchy._bytesPerReal
+        
+    nElements = self.ActiveDimensions.prod()
+
+    # one field has nElements*bytesPerReal bytes and is located
+    # nElements*bytesPerReal*field_index from the offset location
+    if yt2orionFieldsDict.has_key(field):
+        fieldname = yt2orionFieldsDict[field]
+    else:
+        fieldname = field
+    field_index = self.field_indexes[fieldname]
+    inFile.seek(int(nElements*bytesPerReal*field_index),1)
+    field = na.fromfile(inFile,count=nElements,dtype=dtype)
+    field = field.reshape(self.ActiveDimensions[::-1]).swapaxes(0,2)
+
+    # we can/should also check against the max and min in the header file
+    
+    inFile.close()
+    return field
+    
+def readAllDataNative():
+    pass
+
+def readDataSliceNative(self, grid, field, axis, coord):
+    """wishful thinking?
+    """
+    sl = [slice(None), slice(None), slice(None)]
+    sl[axis] = slice(coord, coord + 1)
+    #sl = tuple(reversed(sl))
+    return grid.readDataFast(field)[sl]

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Wed Dec  3 08:14:26 2008
@@ -36,6 +36,8 @@
          getExceptionHDF5, DataQueueHDF5),
      6: (readDataPacked, readAllDataPacked, getFieldsPacked, readDataSlicePacked,
          getExceptionHDF5, DataQueuePackedHDF5),
+     7: (readDataNative, readAllDataNative, None, readDataSliceNative,
+         getExceptionHDF5, None), \
      8: (readDataInMemory, readAllDataInMemory, getFieldsInMemory, readDataSliceInMemory,
          getExceptionInMemory, DataQueueInMemory),
      'enzo_packed_2d': (readDataPacked, readAllDataPacked, getFieldsPacked, readDataSlicePacked2D,
@@ -1055,3 +1057,227 @@
                 yield line
     yield buf  # First line.
 
+class OrionHierarchy(AMRHierarchy):
+    def __init__(self,pf,data_style=7):
+        self.field_info = OrionFieldContainer()
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        header_filename = os.path.join(pf.fullplotdir,'Header')
+        self.directory = pf.fullpath
+        self.data_style = data_style
+        self._setup_classes()
+        self.readGlobalHeader(header_filename,self.parameter_file.paranoid_read) # also sets up the grid objects
+        self.__cache_endianness(self.levels[-1].grids[-1])
+        AMRHierarchy.__init__(self,pf)
+        self._setup_field_list()
+
+    def readGlobalHeader(self,filename,paranoid_read):
+        """
+        read the global header file for an Orion plotfile output.
+        """
+        counter = 0
+        header_file = open(filename,'r')
+        self.__global_header_lines = header_file.readlines()
+
+        # parse the file
+        self.orion_version = self.__global_header_lines[0].rstrip()
+        self.n_fields      = int(self.__global_header_lines[1])
+
+        counter = self.n_fields+2
+        for i,line in enumerate(self.__global_header_lines[2:counter]):
+            self.field_indexes[line.rstrip()] =i
+        self.field_list = []
+        for f in self.field_indexes:
+            self.field_list.append(orion2ytFieldsDict.get(f,f))
+
+        self.dimension = int(self.__global_header_lines[counter])
+        if self.dimension != 3:
+            raise RunTimeError("Orion must be in 3D to use yt.")
+        counter += 1
+        self.Time = float(self.__global_header_lines[counter])
+        counter += 1
+        self.finest_grid_level = int(self.__global_header_lines[counter])
+        self.n_levels = self.finest_grid_level + 1
+        counter += 1
+        self.domainLeftEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.domainRightEdge_unnecessary = na.array(map(float,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.refinementFactor_unnecessary = na.array(map(int,self.__global_header_lines[counter].split()))
+        counter += 1
+        self.globalIndexSpace_unnecessary = self.__global_header_lines[counter]
+        #domain_re.search(self.__global_header_lines[counter]).groups()
+        counter += 1
+        self.timestepsPerLevel_unnecessary = self.__global_header_lines[counter]
+        counter += 1
+        self.dx = na.zeros((self.n_levels,3))
+        for i,line in enumerate(self.__global_header_lines[counter:counter+self.n_levels]):
+            self.dx[i] = na.array(map(float,line.split()))
+        counter += self.n_levels
+        self.geometry = int(self.__global_header_lines[counter])
+        if self.geometry != 0:
+            raise RunTimeError("yt only supports cartesian coordinates.")
+        counter += 1
+
+        # this is just to debug. eventually it should go away.
+        linebreak = int(self.__global_header_lines[counter])
+        if linebreak != 0:
+            raise RunTimeError("INTERNAL ERROR! This should be a zero.")
+        counter += 1
+
+        # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+        self.levels = []
+        grid_counter = 0
+        file_finder_pattern = r"FabOnDisk: (Cell_D_[0-9]{4}) (\d+)\n"
+        re_file_finder = re.compile(file_finder_pattern)
+        dim_finder_pattern = r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)\n"
+        re_dim_finder = re.compile(dim_finder_pattern)
+        
+        for level in range(0,self.n_levels):
+            tmp = self.__global_header_lines[counter].split()
+            # should this be grid_time or level_time??
+            lev,ngrids,grid_time = int(tmp[0]),int(tmp[1]),float(tmp[2])
+            counter += 1
+            nsteps = int(self.__global_header_lines[counter])
+            counter += 1
+            self.levels.append(OrionLevel(lev,ngrids))
+            # open level header, extract file names and offsets for
+            # each grid
+            fn = os.path.join(self.parameter_file.fullplotdir,'Level_%i'%level)
+            level_header_file = open(os.path.join(fn,'Cell_H'),'r').read()
+            grid_file_offset = re_file_finder.findall(level_header_file)
+            start_stop_index = re_dim_finder.findall(level_header_file)
+            for grid in range(0,ngrids):
+                gfn = os.path.join(fn,grid_file_offset[grid][0]) # filename of file containing this grid
+                gfo = int(grid_file_offset[grid][1]) # offset within that file
+                xlo,xhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                ylo,yhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                zlo,zhi = map(float,self.__global_header_lines[counter].split())
+                counter+=1
+                lo = na.array([xlo,ylo,zlo])
+                hi = na.array([xhi,yhi,zhi])
+                dims,start,stop = self.__calculate_grid_dimensions(start_stop_index[grid])
+                self.levels[-1].grids.append(self.grid(lo,hi,grid_counter,level,gfn, gfo, dims,start,stop,paranoia=paranoid_read))
+                grid_counter += 1 # this is global, and shouldn't be reset
+                                  # for each level
+            self.levels[-1]._fileprefix = self.__global_header_lines[counter]
+            counter+=1
+            self.num_grids = grid_counter
+            self.float_type = 'float64'
+
+        self.maxLevel = self.n_levels - 1 
+        self.max_level = self.n_levels - 1
+        header_file.close()
+
+    def __cache_endianness(self,test_grid):
+        """
+        Cache the endianness and bytes perreal of the grids by using a
+        test grid and assuming that all grids have the same
+        endianness. This is a pretty safe assumption since Orion uses
+        one file per processor, and if you're running on a cluster
+        with different endian processors, then you're on your own!
+        """
+        # open the test file & grab the header
+        inFile = open(os.path.expanduser(test_grid.filename),'rb')
+        header = inFile.readline()
+        inFile.close()
+        header.strip()
+        
+        # parse it. the patter is in OrionDefs.py
+        headerRe = re.compile(orion_FAB_header_pattern)
+        bytesPerReal,endian,start,stop,centerType,nComponents = headerRe.search(header).groups()
+        self._bytesPerReal = int(bytesPerReal)
+        if self._bytesPerReal == int(endian[0]):
+            dtype = '<'
+        elif self._bytesPerReal == int(endian[-1]):
+            dtype = '>'
+        else:
+            raise ValueError("FAB header is neither big nor little endian. Perhaps the file is corrupt?")
+
+        dtype += ('f%i' % self._bytesPerReal) # always a floating point
+        self._dtype = dtype
+
+    def __calculate_grid_dimensions(self,start_stop):
+        start = na.array(map(int,start_stop[0].split(',')))
+        stop = na.array(map(int,start_stop[1].split(',')))
+        dimension = stop - start + 1
+        return dimension,start,stop
+        
+
+    def _initialize_grids(self):
+        mylog.debug("Allocating memory for %s grids", self.num_grids)
+        self.gridDimensions = na.zeros((self.num_grids,3), 'int32')
+        self.gridStartIndices = na.zeros((self.num_grids,3), 'int32')
+        self.gridEndIndices = na.zeros((self.num_grids,3), 'int32')
+        self.gridTimes = na.zeros((self.num_grids,1), 'float64')
+        self.gridNumberOfParticles = na.zeros((self.num_grids,1))
+        mylog.debug("Done allocating")
+        mylog.debug("Creating grid objects")
+        self.grids = na.concatenate([level.grids for level in self.levels])
+        self.gridLevels = na.concatenate([level.ngrids*[level.level] for level in self.levels])
+        self.gridLevels = self.gridLevels.reshape((self.num_grids,1))
+        gridDcs = na.concatenate([level.ngrids*[self.dx[level.level]] for level in self.levels],axis=0)
+        self.gridDxs = gridDcs[:,0].reshape((self.num_grids,1))
+        self.gridDys = gridDcs[:,1].reshape((self.num_grids,1))
+        self.gridDzs = gridDcs[:,2].reshape((self.num_grids,1))
+        left_edges = []
+        right_edges = []
+        for level in self.levels:
+            left_edges += [g.LeftEdge for g in level.grids]
+            right_edges += [g.RightEdge for g in level.grids]
+        self.gridLeftEdge = na.array(left_edges)
+        self.gridRightEdge = na.array(right_edges)
+        self.gridReverseTree = [] * self.num_grids
+        self.gridReverseTree = [ [] for i in range(self.num_grids)]
+        self.gridTree = [ [] for i in range(self.num_grids)]
+        mylog.debug("Done creating grid objects")
+
+    def _populate_hierarchy(self):
+        self.__setup_grid_tree()
+        self._setup_grid_corners()
+        for i, grid in enumerate(self.grids):
+            if (i%1e4) == 0: mylog.debug("Prepared % 7i / % 7i grids", i, self.num_grids)
+            grid._prepare_grid()
+            grid._setup_dx()
+
+    def __setup_grid_tree(self):
+        for i, grid in enumerate(self.grids):
+            children = self._get_grid_children(grid)
+            for child in children:
+                self.gridReverseTree[child.id].append(i)
+                self.gridTree[i].append(weakref.proxy(child))
+
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        dd["field_indexes"] = self.field_indexes
+        self.grid = classobj("OrionGrid",(OrionGridBase,), dd)
+        AMRHierarchy._setup_classes(self, dd)
+
+    def _get_grid_children(self, grid):
+        mask = na.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
+        mask[grid_ind] = True
+        mask = na.logical_and(mask, (self.gridLevels == (grid.Level+1)).flat)
+        return self.grids[mask]
+
+    def _setup_field_list(self):
+        self.derived_field_list = []
+        for field in self.field_info:
+            try:
+                fd = self.field_info[field].get_dependencies(pf = self.parameter_file)
+            except:
+                continue
+            available = na.all([f in self.field_list for f in fd.requested])
+            if available: self.derived_field_list.append(field)
+        for field in self.field_list:
+            if field not in self.derived_field_list:
+                self.derived_field_list.append(field)
+
+class OrionLevel:
+    def __init__(self,level,ngrids):
+        self.level = level
+        self.ngrids = ngrids
+        self.grids = []
+    

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Wed Dec  3 08:14:26 2008
@@ -6,7 +6,7 @@
 Affiliation: KIPAC/SLAC/Stanford
 Homepage: http://yt.enzotools.org/
 License:
-  Copyright (C) 2007-2008 Matthew Turk.  All Rights Reserved.
+  Copyright (C) 2007-2008 Matthew Turk, J. S. Oishi.  All Rights Reserved.
 
   This file is part of yt.
 
@@ -324,3 +324,137 @@
         for p, v in self.__conversion_override.items():
             self.conversion_factors[p] = v
 
+class OrionStaticOutput(StaticOutput):
+    """
+    This class is a stripped down class that simply reads and parses, without
+    looking at the Orion hierarchy.
+
+    @todo: 
+
+    @param filename: The filename of the parameterfile we want to load
+    @type filename: String
+    """
+    _hierarchy_class = OrionHierarchy
+    _fieldinfo_class = OrionFieldContainer
+
+    def __init__(self, plotname, paramFilename='inputs',fparamFilename='probin',data_style=7,paranoia=False):
+        """need to override for Orion file structure.
+
+        the paramfile is usually called "inputs"
+        and there may be a fortran inputs file usually called "probin"
+        plotname here will be a directory name
+        as per BoxLib, data_style will be one of
+          Native
+          IEEE
+          ASCII
+
+        """
+        self.field_info = self._fieldinfo_class()
+        self.data_style = data_style
+        self.paranoid_read = paranoia
+        plotname = plotname.rstrip('/')
+        self.basename = os.path.basename(plotname)
+        # this will be the directory ENCLOSING the pltNNNN directory
+        self.directory = os.path.dirname(plotname)
+        self.parameter_filename = os.path.join(self.directory,paramFilename)
+        # fortran parameters
+        self.fparameters = {}
+        self.fparameter_filename = os.path.join(self.directory,fparamFilename)
+        self.fullpath = os.path.abspath(self.directory)
+        self.fullplotdir = os.path.abspath(plotname)
+        if len(self.directory) == 0:
+            self.directory = "."
+        self.conversion_factors = {}
+        self.parameters = {}
+        self._parse_parameter_file()
+        if os.path.isfile(self.fparameter_filename):
+            self._parse_fparameter_file()
+        self._set_units()
+        
+        # These should maybe not be hardcoded?
+        self.parameters["HydroMethod"] = 'orion' # always PPM DE
+        self.parameters["InitialTime"] = 0. # FIX ME!!!
+        self.parameters["DualEnergyFormalism"] = 0 # always off.
+        if self.fparameters.has_key("mu"):
+            self.parameters["mu"] = self.fparameters["mu"]
+        
+    def _parse_parameter_file(self):
+        """
+        Parses the parameter file and establishes the various
+        dictionaries.
+        """
+        # Let's read the file
+        self.parameters["CurrentTimeIdentifier"] = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        lines = open(self.parameter_filename).readlines()
+        for lineI, line in enumerate(lines):
+            if line.find("#") >= 1: # Keep the commented lines...
+                line=line[:line.find("#")]
+            line=line.strip().rstrip()
+            if len(line) < 2 or line.find("#") == 0: # ...but skip comments
+                continue
+            try:
+                param, vals = map(strip,map(rstrip,line.split("=")))
+            except ValueError:
+                mylog.error("ValueError: '%s'", line)
+            if orion2enzoDict.has_key(param):
+                paramName = orion2enzoDict[param]
+                t = map(parameterDict[paramName], vals.split())
+                if len(t) == 1:
+                    self.parameters[paramName] = t[0]
+                else:
+                    self.parameters[paramName] = t
+            elif param.startswith("geometry.prob_hi"):
+                self.parameters["DomainRightEdge"] = \
+                    na.array([float(i) for i in vals.split()])
+            elif param.startswith("geometry.prob_lo"):
+                self.parameters["DomainLeftEdge"] = \
+                    na.array([float(i) for i in vals.split()])
+
+    def _parse_fparameter_file(self):
+        """
+        Parses the fortran parameter file for Orion. Most of this will
+        be useless, but this is where it keeps mu = mass per
+        particle/m_hydrogen.
+        """
+        lines = open(self.fparameter_filename).readlines()
+        for line in lines:
+            if line.count("=") == 1:
+                param, vals = map(strip,map(rstrip,line.split("=")))
+                if vals.count("'") == 0:
+                    t = map(float,[a.replace('D','e').replace('d','e') for a in vals.split()]) # all are floating point.
+                else:
+                    t = vals.split()
+                if len(t) == 1:
+                    self.fparameters[param] = t[0]
+                else:
+                    self.fparameters[param] = t
+                
+                
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        seconds = 1 #self["Time"]
+        self.time_units['years'] = seconds / (365*3600*24.0)
+        self.time_units['days']  = seconds / (3600*24.0)
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Wed Dec  3 08:14:26 2008
@@ -77,6 +77,7 @@
 import PointCombine
 import HDF5LightReader
 from EnzoDefs import *
+from OrionDefs import *
 
 from ParallelTools import *
 # Now our fields
@@ -84,7 +85,7 @@
 from FieldInfoContainer import *
 from UniversalFields import *
 from EnzoFields import *
-
+from OrionFields import *
 fieldInfo = EnzoFieldInfo
 
 from DerivedQuantities import DerivedQuantityCollection, GridChildMaskWrapper

Modified: trunk/yt/mods.py
==============================================================================
--- trunk/yt/mods.py	(original)
+++ trunk/yt/mods.py	Wed Dec  3 08:14:26 2008
@@ -41,7 +41,8 @@
 from yt.lagos import EnzoStaticOutput, \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     add_field, FieldInfo, EnzoFieldInfo, Enzo2DFieldInfo, OrionFieldInfo, \
-    Clump, write_clump_hierarchy, find_clumps, write_clumps
+    Clump, write_clump_hierarchy, find_clumps, write_clumps, \
+    OrionStaticOutput
 
 # This is a temporary solution -- in the future, we will allow the user to
 # select this via ytcfg.



More information about the yt-svn mailing list