[Yt-svn] yt-commit r1806 - in trunk: . yt yt/_amr_utils yt/extensions yt/extensions/volume_rendering yt/lagos yt/ramses_headers yt/raven

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Tue Aug 10 16:02:12 PDT 2010


Author: mturk
Date: Tue Aug 10 16:02:10 2010
New Revision: 1806
URL: http://yt.enzotools.org/changeset/1806

Log:
Backporting from hg.

This includes support for three new Astrophysical Simulation codes: RAMSES,
FLASH and nascent support for Gadget.  This includes contributions from Chris
Moody, John ZuHone and Oliver Hahn.

John Wise has also added a streamline integrator.

It also includes some bug fixes for volume rendering, grid homogenization and
eps writing, as well as grid reflection for volume rendering.

Documentation for these new codes is forthcoming, as not all features for all
codes are yet supported.



Added:
   trunk/yt/lagos/FLASHFields.py
   trunk/yt/lagos/RAMSESFields.py
   trunk/yt/ramses_headers/
   trunk/yt/ramses_headers/FortranUnformatted_IO.hh
   trunk/yt/ramses_headers/RAMSES_amr_data.hh
   trunk/yt/ramses_headers/RAMSES_hydro_data.hh
   trunk/yt/ramses_headers/RAMSES_info.hh
   trunk/yt/ramses_headers/RAMSES_particle_data.hh
   trunk/yt/ramses_headers/RAMSES_poisson_data.hh
   trunk/yt/ramses_headers/RAMSES_typedefs.h
   trunk/yt/ramses_headers/README
   trunk/yt/ramses_headers/data_iterators.hh
   trunk/yt/ramses_reader.pyx
Modified:
   trunk/CREDITS
   trunk/yt/_amr_utils/VolumeIntegrator.pyx
   trunk/yt/extensions/eps_writer.py
   trunk/yt/extensions/volume_rendering/grid_partitioner.py
   trunk/yt/lagos/BaseDataTypes.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/DataReadingFuncs.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/__init__.py
   trunk/yt/mods.py
   trunk/yt/raven/Callbacks.py
   trunk/yt/setup.py

Modified: trunk/CREDITS
==============================================================================
--- trunk/CREDITS	(original)
+++ trunk/CREDITS	Tue Aug 10 16:02:10 2010
@@ -8,10 +8,15 @@
                                 Devin Silvia (devin.silvia at gmail.com)
                                 John Wise (jwise at astro.princeton.edu)
                                 David Collins (dcollins at physics.ucsd.edu)
+                                Christopher Moody (cemoody at ucsc.edu)
+                                Oliver Hahn (ohahn at stanford.edu)
+                                John ZuHone (jzuhone at cfa.harvard.edu)
 
 We also include the Delaunay Triangulation module written by Robert Kern of
 Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
 Nilton Volpato.  The PasteBin interface code (as well as the PasteBin itself)
-was written by the Pocoo collective (pocoo.org).
+was written by the Pocoo collective (pocoo.org).  The RamsesRead++ library was
+developed by Oliver Hahn.  Large parts of this code were guided by discussions
+with Tom Abel, Ralf Kaehler, Mike Norman and Greg Bryan.
 
 Thanks to everyone for all your contributions!

Modified: trunk/yt/_amr_utils/VolumeIntegrator.pyx
==============================================================================
--- trunk/yt/_amr_utils/VolumeIntegrator.pyx	(original)
+++ trunk/yt/_amr_utils/VolumeIntegrator.pyx	Tue Aug 10 16:02:10 2010
@@ -106,9 +106,9 @@
     cdef np.float64_t bv, dy, dd, tf
     cdef int bin_id
     if fit.pass_through == 1: return dvs[fit.field_id]
+    if dvs[fit.field_id] > fit.bounds[1] or dvs[fit.field_id] < fit.bounds[0]: return 0.0
     bin_id = <int> ((dvs[fit.field_id] - fit.bounds[0]) * fit.idbin)
     dd = dvs[fit.field_id] - (fit.bounds[0] + bin_id * fit.dbin) # x - x0
-    if bin_id > fit.nbins - 2 or bin_id < 0: return 0.0
     bv = fit.values[bin_id]
     dy = fit.values[bin_id + 1] - bv
     if fit.weight_field_id != -1:
@@ -411,6 +411,12 @@
         for i in range(3):
             if (v_dir[i] < 0):
                 step[i] = -1
+            elif (v_dir[i] == 0):
+                step[i] = 1
+                tmax[i] = 1e60
+                iv_dir[i] = 1e60
+                tdelta[i] = 1e-60
+                continue
             else:
                 step[i] = 1
             x = (i+1) % 3

Modified: trunk/yt/extensions/eps_writer.py
==============================================================================
--- trunk/yt/extensions/eps_writer.py	(original)
+++ trunk/yt/extensions/eps_writer.py	Tue Aug 10 16:02:10 2010
@@ -316,6 +316,8 @@
         >>> d.save_fig()
         """
         image = pyx.bitmap.jpegimage(filename)
+        if self.canvas is None:
+            self.canvas = pyx.canvas.canvas()
         self.canvas.insert(pyx.bitmap.bitmap(pos[0], pos[1], image,
                                              compressmode=None,
                                              width=self.figsize[0],
@@ -695,7 +697,8 @@
 def multiplot(ncol, nrow, yt_plots=None, images=None, xranges=None,
               yranges=None, xlabels=None, ylabels=None, colorbars=None,
               shrink_cb=0.95, figsize=(8,8), margins=(0,0), titles=None,
-              savefig=None, yt_nocbar=False, bare_axes=False):
+              savefig=None, yt_nocbar=False, bare_axes=False,
+              cb_flags=None):
     r"""Convenience routine to create a multi-panel figure from yt plots or
     JPEGs.  The images are first placed from the origin, and then
     bottom-to-top and left-to-right.
@@ -736,6 +739,8 @@
     bare_axes : boolean
         Set to true to have no annotations or tick marks on all of the
         axes.
+    cb_flags : list of booleans
+        Flags for each plot to have a colorbar or not.
 
     Examples
     --------
@@ -747,7 +752,8 @@
     >>> cbs.append(return_cmap("hot", r"Entropy [K cm$^2$]", (1e-2,1e6), True))
     >>> cbs.append(return_cmap("Spectral", "Stuff$_x$!", (1,300), True))
     >>> 
-    >>> mp = multiplot(images,2,2, margins=(0.1,0.1), titles=["1","2","3","4"],
+    >>> mp = multiplot(2,2, images=images, margins=(0.1,0.1),
+    >>>                titles=["1","2","3","4"],
     >>>                xlabels=["one","two"], ylabels=None, colorbars=cbs,
     >>>                shrink_cb=0.95)
     >>> mp.scale_line(label="$r_{vir}$", labelloc="top")
@@ -771,6 +777,8 @@
         print "Given both images and yt plots.  Ignoring images."
     if yt_plots != None:
         _yt = True
+    else:
+        _yt = False
 
     # If no ranges or labels given and given only images, fill them in.
     if not _yt:
@@ -820,7 +828,7 @@
             if titles != None:
                 if titles[index] != None:
                     d.title_box(titles[index],
-                                loc=(i+0.02+i*margins[0]/figsize[0],
+                                loc=(i+0.05+i*margins[0]/figsize[0],
                                      j+0.98+j*margins[1]/figsize[1]))
 
     # Insert colorbars after all axes are placed because we want to
@@ -835,6 +843,9 @@
             xpos0 = i*(figsize[0] + margins[0])
             index = j*ncol + i
             if (not _yt and colorbars != None) or (_yt and not yt_nocbar):
+                if cb_flags != None:
+                    if cb_flags[index] == False:
+                        continue
                 if _yt or colorbars[index] != None:
                     if ncol == 1:
                         orientation = "right"

Modified: trunk/yt/extensions/volume_rendering/grid_partitioner.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/grid_partitioner.py	(original)
+++ trunk/yt/extensions/volume_rendering/grid_partitioner.py	Tue Aug 10 16:02:10 2010
@@ -55,13 +55,17 @@
         self.log_fields = log_fields
 
     def traverse(self, back_point, front_point):
+        mylog.info("Traversing %s bricks between %s and %s",
+                   len(self.bricks), back_point, front_point)
         if self.bricks is None: self.initialize_source()
         vec = front_point - back_point
         dist = na.minimum(
              na.sum((self.brick_left_edges - back_point) * vec, axis=1),
              na.sum((self.brick_right_edges - back_point) * vec, axis=1))
         ind = na.argsort(dist)
-        for b in self.bricks[ind]: yield b
+        for b in self.bricks[ind]:
+            #print b.LeftEdge, b.RightEdge
+            yield b
 
     def _partition_grid(self, grid):
 
@@ -100,6 +104,9 @@
             bricks += self._partition_grid(g)
         pbar.finish()
         bricks = na.array(bricks, dtype='object')
+        self.initialize_bricks(bricks)
+
+    def initialize_bricks(self, bricks):
         NB = len(bricks)
         # Now we set up our (local for now) hierarchy.  Note that to calculate
         # intersection, we only need to do the left edge & right edge.
@@ -118,6 +125,23 @@
         self.brick_dimensions -= 1
         self.bricks = na.array(bricks, dtype='object')
 
+    def reflect_across_boundaries(self):
+        mylog.warning("Note that this doesn't fix ghost zones, so there may be artifacts at domain boundaries!")
+        nb = []
+        # Simplest, clearest iteration ...
+        for i in [-1, 1]:
+            for j in [-1, 1]:
+                for k in [-1, 1]:
+                    for b in self.bricks:
+                        BB = na.array([b.LeftEdge * [i,j,k], b.RightEdge * [i,j,k]])
+                        LE, RE = na.min(BB, axis=0), na.max(BB, axis=0)
+                        nb.append(
+                            PartitionedGrid(b.parent_grid_id, len(b.my_data), 
+                                [md[::i,::j,::k].copy("C") for md in b.my_data],
+                                LE, RE, na.array(b.my_data[0].shape) - 1))
+        # Replace old bricks
+        self.initialize_bricks(nb)
+
     def store_bricks(self, fn):
         import h5py, cPickle
         f = h5py.File(fn, "w")

Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py	(original)
+++ trunk/yt/lagos/BaseDataTypes.py	Tue Aug 10 16:02:10 2010
@@ -2244,6 +2244,7 @@
         """
         AMR3DData.__init__(self, center, fields, pf, **kwargs)
         self._grids = na.array(grid_list)
+        self.grid_list = self._grids
 
     def _get_list_of_grids(self):
         pass

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Tue Aug 10 16:02:10 2010
@@ -60,7 +60,8 @@
         if self.start_index != None:
             return self.start_index
         if self.Parent == None:
-            start_index = self.LeftEdge / self.dds
+            iLE = self.LeftEdge - self.pf["DomainLeftEdge"]
+            start_index = iLE / self.dds
             return na.rint(start_index).astype('int64').ravel()
         pdx = self.Parent.dds
         start_index = (self.Parent.get_global_startindex()) + \
@@ -580,79 +581,53 @@
     def __repr__(self):
         return "OrionGrid_%04i" % (self.id)
 
-class ProtoGadgetGrid(object):
-    def __init__(self, level, left_edge, right_edge, particles, parent = None):
-        # generate the left/right edge from the octant and the level argument
-        self.level = level
-        self.left_edge = left_edge
-        self.right_edge = right_edge
-        self.particles = particles
-        self.parent = parent # We break convention here -- this is a protogrid
-        self.children = []
-
-    def split(self):
-        #split_axis = (self.left_edge + self.right_edge) / 2.0
-        W = 0.5 * (self.right_edge - self.left_edge) # This is the half-width
-        keep = na.ones(self.particles.shape[1], dtype='bool')
-        for i in xrange(2):
-            for j in xrange(2):
-                for k in xrange(2):
-                    LE = self.left_edge.copy()
-                    LE += na.array([i, j, k], dtype='float64') * W
-                    RE = LE + W
-                    pi = self.select_particles(LE, RE)
-                    keep &= (~pi)
-                    g = ProtoGadgetGrid(self.level + 1, LE, RE,
-                                        self.particles[:,pi], self)
-                    self.children.append(g)
-                    yield g
-        self.particles = self.particles[:,keep]
-
-    def select_particles(self, LE, RE):
-        pi = na.all( (self.particles >= LE[:,None])
-                   & (self.particles <  RE[:,None]), axis=0)
-        return pi
-
-    def refine(self, npart = 128):
-        gs = [self]
-        if self.particles.shape[1] < npart: return gs
-        for grid in self.split():
-            gs += grid.refine(npart)
-        return gs
-
 class GadgetGrid(AMRGridPatch):
 
     _id_offset = 0
 
-    def __init__(self, id, hierarchy, proto_grid):
+    def __init__(self, id, filename, hierarchy, **kwargs):
         AMRGridPatch.__init__(self, id, hierarchy = hierarchy)
-        self.Children = []
-        if proto_grid.parent is None:
-            self.Parent = None
-        else:
-            self.Parent = proto_grid.parent.real_grid
-            self.Parent.Children.append(self)
-        self.Level = proto_grid.level
-        self.LeftEdge = proto_grid.left_edge
-        self.RightEdge = proto_grid.right_edge
-        self.NumberOfParticles = proto_grid.particles.shape[1]
-        self._storage = {}
-        self._storage['particle_position_x'] = proto_grid.particles[0,:]
-        self._storage['particle_position_y'] = proto_grid.particles[1,:]
-        self._storage['particle_position_z'] = proto_grid.particles[2,:]
-        # Something should be done here for the volume change as you go down
-        # the hierarchy...
-        self._storage['particle_mass'] = na.ones(self.NumberOfParticles,
-                                           dtype='float64') * (8 ** self.Level)
-        # Our dx is a bit fluid here, so we defer
-        dims = self.pf["TopGridDimensions"]
-        # Hard code to refineby 2
-        dds = 1.0 / (dims * 2**self.Level)
-        ad = na.rint((self.RightEdge - self.LeftEdge) / dds)
-        self.ActiveDimensions = ad.astype('int64')
+        self.id = id
+        self.filename = filename
+        self.Children = [] #grid objects
+        self.Parent = None
+        self.Level = 0
+        self.LeftEdge = [0,0,0]
+        self.RightEdge = [0,0,0]
+        self.IsLeaf = False
+        self.N = 0
+        self.Address = ''
+        self.NumberOfParticles = self.N
+        self.ActiveDimensions = [0,0,0]
+        self._id_offset = 0
+        
+        for key,val in kwargs.items():
+            if key in dir(self):
+                #if it's one of the predefined values
+                setattr(self,key,val)
+        
+    #def __repr__(self):
+    #    return "GadgetGrid_%04i" % (self.Address)
+    
+    def _prepare_grid(self):
+        #all of this info is already included in the snapshots
+        pass
+        #h = self.hierarchy
+        #h.grid_levels[self.Address,0]=self.Level
+        #h.grid_left_edge[self.Address,:]=self.LeftEdge[:]
+        #h.grid_right_edge[self.Address,:]=self.RightEdge[:]
+    
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id
+        LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+        self.dds = na.array((RE-LE)/self.ActiveDimensions)
+        if self.pf["TopGridRank"] < 2: self.dds[1] = 1.0
+        if self.pf["TopGridRank"] < 3: self.dds[2] = 1.0
+        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
 
-    def __repr__(self):
-        return "GadgetGrid_%04i" % (self.id)
 
 class ChomboGrid(AMRGridPatch):
     _id_offset = 0
@@ -705,3 +680,61 @@
     def __repr__(self):
         return "TigerGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
 
+
+class FLASHGrid(AMRGridPatch):
+    _id_offset = 1
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Parent = None
+        self.Children = []
+        self.Level = level
+
+    def __repr__(self):
+        return "FLASHGrid_%04i (%s)" % (self.id, self.ActiveDimensions)
+
+class RAMSESGrid(AMRGridPatch):
+    _id_offset = 0
+    #__slots__ = ["_level_id", "stop_index"]
+    def __init__(self, id, hierarchy, level, locations, start_index):
+        AMRGridPatch.__init__(self, id, filename = hierarchy.hierarchy_filename,
+                              hierarchy = hierarchy)
+        self.Level = level
+        self.Parent = []
+        self.Children = []
+        self.locations = locations
+        self.start_index = start_index.copy()
+
+    def _setup_dx(self):
+        # So first we figure out what the index is.  We don't assume
+        # that dx=dy=dz , at least here.  We probably do elsewhere.
+        id = self.id - self._id_offset
+        if len(self.Parent) > 0:
+            self.dds = self.Parent[0].dds / self.pf["RefineBy"]
+        else:
+            LE, RE = self.hierarchy.grid_left_edge[id,:], \
+                     self.hierarchy.grid_right_edge[id,:]
+            self.dds = na.array((RE-LE)/self.ActiveDimensions)
+        if self.pf["TopGridRank"] < 2: self.dds[1] = 1.0
+        if self.pf["TopGridRank"] < 3: self.dds[2] = 1.0
+        self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+
+    def get_global_startindex(self):
+        """
+        Return the integer starting index for each dimension at the current
+        level.
+        """
+        if self.start_index != None:
+            return self.start_index
+        if len(self.Parent) == 0:
+            start_index = self.LeftEdge / self.dds
+            return na.rint(start_index).astype('int64').ravel()
+        pdx = self.Parent[0].dds
+        start_index = (self.Parent[0].get_global_startindex()) + \
+                       na.rint((self.LeftEdge - self.Parent[0].LeftEdge)/pdx)
+        self.start_index = (start_index*self.pf["RefineBy"]).astype('int64').ravel()
+        return self.start_index
+
+    def __repr__(self):
+        return "RAMSESGrid_%04i (%s)" % (self.id, self.ActiveDimensions)

Modified: trunk/yt/lagos/DataReadingFuncs.py
==============================================================================
--- trunk/yt/lagos/DataReadingFuncs.py	(original)
+++ trunk/yt/lagos/DataReadingFuncs.py	Tue Aug 10 16:02:10 2010
@@ -25,6 +25,7 @@
 from yt.lagos import *
 import yt.amr_utils as au
 import exceptions
+import cPickle
 
 _axis_ids = {0:2,1:1,2:0}
 
@@ -426,10 +427,27 @@
         return t
 
 class IOHandlerGadget(BaseIOHandler):
-    _data_style = 'gadget_binary'
+    _data_style = 'gadget_hdf5'
     def _read_data_set(self, grid, field):
-        return grid._storage[field]
-
+        adr = grid.Address
+        fh = h5py.File(grid.filename,mode='r')
+        if 'particles' in fh[adr].keys():
+            adr2 = adr+'/particles'
+            return fh[adr2][field]
+        return None
+    def _read_field_names(self,grid): 
+        adr = grid.Address
+        fh = h5py.File(grid.filename,mode='r')
+        rets = cPickle.loads(fh['/root'].attrs['fieldnames'])
+        return rets
+
+    def _read_data_slice(self,grid, field, axis, coord):
+        adr = grid.Address
+        fh = h5py.File(grid.filename,mode='r')
+        if 'particles' in fh[adr].keys():
+            adr2 = adr+'/particles'
+            return fh[adr2][field][coord,axis]
+        return None
 #
 # Chombo readers
 #
@@ -499,3 +517,50 @@
         SS[axis] = 1
         data = au.read_tiger_section(fn, LD, SS, RS).astype("float64")
         return data
+
+class IOHandlerFLASH(BaseIOHandler):
+    _data_style = "flash_hdf5"
+
+    def __init__(self, *args, **kwargs):
+        BaseIOHandler.__init__(self, *args, **kwargs)
+
+    def _read_data_set(self, grid, field):
+        f = h5py.File(grid.pf.parameter_filename, "r")
+        tr = f["/%s" % field][grid.id - grid._id_offset,:,:,:].transpose()
+        return tr.astype("float64")
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        f = h5py.File(grid.pf.parameter_filename, "r")
+        tr = f["/%s" % field][grid.id - grid._id_offset].transpose()[sl]
+        return tr.astype("float64")
+
+class IOHandlerRAMSES(BaseIOHandler):
+    _data_style = "ramses"
+
+    def __init__(self, ramses_tree, *args, **kwargs):
+        self.ramses_tree = ramses_tree
+        BaseIOHandler.__init__(self, *args, **kwargs)
+
+    def _read_data_set(self, grid, field):
+        tr = na.zeros(grid.ActiveDimensions, dtype='float64')
+        filled = na.zeros(grid.ActiveDimensions, dtype='int32')
+        to_fill = grid.ActiveDimensions.prod()
+        grids = [grid]
+        l_delta = 0
+        while to_fill > 0 and len(grids) > 0:
+            next_grids = []
+            for g in grids:
+                to_fill -= self.ramses_tree.read_grid(field,
+                        grid.get_global_startindex(), grid.ActiveDimensions,
+                        tr, filled, g.Level, 2**l_delta, g.locations)
+                next_grids += g.Parent
+            grids = next_grids
+            l_delta += 1
+        return tr
+
+    def _read_data_slice(self, grid, field, axis, coord):
+        sl = [slice(None), slice(None), slice(None)]
+        sl[axis] = slice(coord, coord + 1)
+        return self._read_data_set(grid, field)[sl]

Added: trunk/yt/lagos/FLASHFields.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/FLASHFields.py	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,97 @@
+"""
+Chombo-specific fields
+
+Author: J. S. Oishi <jsoishi at gmail.com>
+Affiliation: UC Berkeley
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2009 J. S. Oishi, Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from UniversalFields import *
+class FLASHFieldContainer(CodeFieldInfoContainer):
+    _shared_state = {}
+    _field_list = {}
+FLASHFieldInfo = FLASHFieldContainer()
+add_flash_field = FLASHFieldInfo.add_field
+
+add_field = add_flash_field
+
+# Common fields in FLASH: (Thanks to John ZuHone for this list)
+#
+# dens gas mass density (g/cc) --
+# eint internal energy (ergs/g) --
+# ener total energy (ergs/g), with 0.5*v^2 --
+# gamc gamma defined as ratio of specific heats, no units
+# game gamma defined as in , no units
+# gpol gravitational potential from the last timestep (ergs/g)
+# gpot gravitational potential from the current timestep (ergs/g)
+# grac gravitational acceleration from the current timestep (cm s^-2)
+# pden particle mass density (usually dark matter) (g/cc)
+# pres pressure (erg/cc)
+# temp temperature (K) --
+# velx velocity x (cm/s) --
+# vely velocity y (cm/s) --
+# velz velocity z (cm/s) --
+
+translation_dict = {"x-velocity": "velx",
+                    "y-velocity": "vely",
+                    "z-velocity": "velz",
+                    "Density": "dens",
+                    "Total_Energy": "ener",
+                    "Gas_Energy": "eint",
+                    "Temperature": "temp",
+                   }
+
+def _generate_translation(mine, theirs):
+    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
+
+for f,v in translation_dict.items():
+    if v not in FLASHFieldInfo:
+        add_field(v, function=lambda a,b: None, take_log=False,
+                  validators = [ValidateDataField(v)])
+    #print "Setting up translator from %s to %s" % (v, f)
+    _generate_translation(v, f)
+
+add_field("gamc", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("gamc")],
+          units = r"\rm{ratio\/of\/specific\/heats}")
+
+add_field("game", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("game")],
+          units = r"\rm{ratio\/of\/specific\/heats}")
+
+add_field("gpot", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("gpot")],
+          units = r"\rm{ergs\//\/g}")
+
+add_field("gpot", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("gpol")],
+          units = r"\rm{ergs\//\/g}")
+
+add_field("grac", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("grac")],
+          units = r"\rm{cm\/s^{-2}}")
+
+add_field("pden", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("pden")],
+          units = r"\rm{g}\//\/\rm{cm}^{3}")
+
+add_field("pres", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("pres")],
+          units = r"\rm{erg}\//\/\rm{cm}^{3}")

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Tue Aug 10 16:02:10 2010
@@ -25,9 +25,20 @@
 
 from yt.lagos import *
 from yt.funcs import *
-import string, re, gc, time
-import cPickle
+import string, re, gc, time, cPickle, pdb
 from itertools import chain, izip
+try:
+    import yt.ramses_reader as ramses_reader
+except ImportError:
+    mylog.warning("Ramses Reader not imported")
+
+def num_deep_inc(f):
+    def wrap(self, *args, **kwargs):
+        self.num_deep += 1
+        rv = f(self, *args, **kwargs)
+        self.num_deep -= 1
+        return rv
+    return wrap
 
 class AMRHierarchy(ObjectFindingMixin, ParallelAnalysisInterface):
     float_type = 'float64'
@@ -1190,9 +1201,108 @@
         self.ngrids = ngrids
         self.grids = []
     
-
 class GadgetHierarchy(AMRHierarchy):
+    grid = GadgetGrid
+
+    def __init__(self, pf, data_style='gadget_hdf5'):
+        self.field_info = GadgetFieldContainer()
+        self.directory = os.path.dirname(pf.parameter_filename)
+        self.data_style = data_style
+        self._handle = h5py.File(pf.parameter_filename)
+        AMRHierarchy.__init__(self, pf, data_style)
+        self._handle.close()
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        #example string:
+        #"(S'VEL'\np1\nS'ID'\np2\nS'MASS'\np3\ntp4\n."
+        #fields are surrounded with '
+        fields_string=self._handle['root'].attrs['names']
+        #splits=fields_string.split("'")
+        #pick out the odd fields
+        #fields= [splits[j] for j in range(1,len(splits),2)]
+        self.field_list = cPickle.loads(fields_string)
+    
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        fh = self._handle #shortcut
+        #nodes in the hdf5 file are the same as grids
+        #in yt
+        #the num of levels and total nodes is already saved
+        self._levels   = self.pf._get_param('maxlevel')
+        self.num_grids = self.pf._get_param('numnodes')
+        
+    def _parse_hierarchy(self):
+        #for every box, define a self.grid(level,edge1,edge2) 
+        #with particle counts, dimensions
+        f = self._handle #shortcut
+        
+        root = f['root']
+        grids,numnodes = self._walk_nodes(None,root,[])
+        dims = [self.pf.max_grid_size for grid in grids]
+        LE = [grid.LeftEdge for grid in grids]
+        RE = [grid.RightEdge for grid in grids]
+        levels = [grid.Level for grid in grids]
+        counts = [(grid.N if grid.IsLeaf else 0) for grid in grids]
+        self.grids = na.array(grids,dtype='object')
+        self.grid_dimensions[:] = na.array(dims, dtype='int64')
+        self.grid_left_edge[:] = na.array(LE, dtype='float64')
+        self.grid_right_edge[:] = na.array(RE, dtype='float64')
+        self.grid_levels.flat[:] = na.array(levels, dtype='int32')
+        self.grid_particle_count.flat[:] = na.array(counts, dtype='int32')
+            
+    def _walk_nodes(self,parent,node,grids,idx=0):
+        pi = cPickle.loads
+        loc = node.attrs['h5address']
+        
+        kwargs = {}
+        kwargs['Address'] = loc
+        kwargs['Children'] = [ch for ch in node.values()]
+        kwargs['Parent'] = parent
+        kwargs['Level']  = self.pf._get_param('level',location=loc)
+        kwargs['LeftEdge'] = self.pf._get_param('leftedge',location=loc) 
+        kwargs['RightEdge'] = self.pf._get_param('rightedge',location=loc)
+        kwargs['IsLeaf'] = self.pf._get_param('isleaf',location=loc)
+        kwargs['N'] = self.pf._get_param('n',location=loc)
+        kwargs['NumberOfParticles'] = self.pf._get_param('n',location=loc)
+        dx = self.pf._get_param('dx',location=loc)
+        dy = self.pf._get_param('dy',location=loc)
+        dz = self.pf._get_param('dz',location=loc)
+        kwargs['ActiveDimensions'] = (dx,dy,dz)
+        grid = self.grid(idx,self.pf.parameter_filename,self,**kwargs)
+        idx+=1
+        grids += [grid,]
+        #pdb.set_trace()
+        if kwargs['IsLeaf']:
+            return grids,idx
+        else:
+            for child in node.values():
+                grids,idx=self._walk_nodes(node,child,grids,idx=idx)
+        return grids,idx
+    
+    def _populate_grid_objects(self):
+        for g in self.grids:
+            g._prepare_grid()
+        self.max_level = self._handle['root'].attrs['maxlevel']
+    
+    def _setup_unknown_fields(self):
+        pass
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _get_grid_children(self, grid):
+        #given a grid, use it's address to find subchildren
+        pass
 
+class GadgetHierarchyOld(AMRHierarchy):
+    #Kept here to compare for the time being
     grid = GadgetGrid
 
     def __init__(self, pf, data_style):
@@ -1412,3 +1522,271 @@
 
     def _setup_derived_fields(self):
         self.derived_field_list = []
+
+class FLASHHierarchy(AMRHierarchy):
+
+    grid = FLASHGrid
+    _handle = None
+    
+    def __init__(self,pf,data_style='chombo_hdf5'):
+        self.data_style = data_style
+        self.field_info = FLASHFieldContainer()
+        self.field_indexes = {}
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self._handle = h5py.File(self.hierarchy_filename)
+
+        self.float_type = na.float64
+        AMRHierarchy.__init__(self,pf,data_style)
+
+        self._handle.close()
+        self._handle = None
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        ncomp = self._handle["/unknown names"].shape[0]
+        self.field_list = [s.strip() for s in self._handle["/unknown names"][:].flat]
+    
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        try:
+            self.num_grids = self.parameter_file._find_parameter(
+                "integer", "globalnumblocks", True, self._handle)
+        except KeyError:
+            self.num_grids = self._handle["/simulation parameters"][0][0]
+        
+    def _parse_hierarchy(self):
+        f = self._handle # shortcut
+        pf = self.parameter_file # shortcut
+        
+        self.grid_left_edge[:] = f["/bounding box"][:,:,0]
+        self.grid_right_edge[:] = f["/bounding box"][:,:,1]
+        # Move this to the parameter file
+        try:
+            nxb = pf._find_parameter("integer", "nxb", True, f)
+            nyb = pf._find_parameter("integer", "nyb", True, f)
+            nzb = pf._find_parameter("integer", "nzb", True, f)
+        except KeyError:
+            nxb, nyb, nzb = [int(f["/simulation parameters"]['n%sb' % ax])
+                              for ax in 'xyz']
+        self.grid_dimensions[:] *= (nxb, nyb, nzb)
+        # particle count will need to be fixed somehow:
+        #   by getting access to the particle file we can get the number of
+        #   particles in each brick.  but how do we handle accessing the
+        #   particle file?
+
+        # This will become redundant, as _prepare_grid will reset it to its
+        # current value.  Note that FLASH uses 1-based indexing for refinement
+        # levels, but we do not, so we reduce the level by 1.
+        self.grid_levels.flat[:] = f["/refine level"][:][:] - 1
+        g = [self.grid(i+1, self, self.grid_levels[i,0])
+                for i in xrange(self.num_grids)]
+        self.grids = na.array(g, dtype='object')
+
+    def _populate_grid_objects(self):
+        # We only handle 3D data, so offset is 7 (nfaces+1)
+        
+        offset = 7
+        ii = na.argsort(self.grid_levels.flat)
+        gid = self._handle["/gid"][:]
+        for g in self.grids[ii].flat:
+            gi = g.id - g._id_offset
+            # FLASH uses 1-indexed group info
+            g.Children = [self.grids[i - 1] for i in gid[gi,7:] if i > -1]
+            for g1 in g.Children:
+                g1.Parent = g
+            g._prepare_grid()
+            g._setup_dx()
+        self.max_level = self.grid_levels.max()
+
+    def _setup_unknown_fields(self):
+        for field in self.field_list:
+            if field in self.parameter_file.field_info: continue
+            mylog.info("Adding %s to list of fields", field)
+            cf = None
+            if self.parameter_file.has_key(field):
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+            add_field(field, lambda a, b: None,
+                      convert_function=cf, take_log=False)
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+class RAMSESHierarchy(AMRHierarchy):
+
+    grid = RAMSESGrid
+    _handle = None
+    
+    def __init__(self,pf,data_style='ramses'):
+        self.data_style = data_style
+        self.field_info = RAMSESFieldContainer()
+        self.parameter_file = weakref.proxy(pf)
+        # for now, the hierarchy file is the parameter file!
+        self.hierarchy_filename = self.parameter_file.parameter_filename
+        self.directory = os.path.dirname(self.hierarchy_filename)
+        self.tree_proxy = pf.ramses_tree
+
+        self.float_type = na.float64
+        AMRHierarchy.__init__(self,pf,data_style)
+
+    def _initialize_data_storage(self):
+        pass
+
+    def _detect_fields(self):
+        self.field_list = self.tree_proxy.field_names[:]
+    
+    def _setup_classes(self):
+        dd = self._get_data_reader_dict()
+        AMRHierarchy._setup_classes(self, dd)
+        self.object_types.sort()
+
+    def _count_grids(self):
+        # We have to do all the patch-coalescing here.
+        level_info = self.tree_proxy.count_zones()
+        num_ogrids = sum(level_info)
+        ogrid_left_edge = na.zeros((num_ogrids,3), dtype='float64')
+        ogrid_right_edge = na.zeros((num_ogrids,3), dtype='float64')
+        ogrid_levels = na.zeros((num_ogrids,1), dtype='int32')
+        ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
+        ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
+        self.tree_proxy.fill_hierarchy_arrays(
+            ogrid_left_edge, ogrid_right_edge,
+            ogrid_levels, ogrid_file_locations, ochild_masks)
+        # We now have enough information to run the patch coalescing 
+        self.proto_grids = []
+        for level in xrange(len(level_info)):
+            if level_info[level] == 0: continue
+            ggi = (ogrid_levels == level).ravel()
+            left_index = na.rint((ogrid_left_edge[ggi,:]) * (2.0**(level+1))).astype('int64')
+            right_index = left_index + 2
+            dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
+            fl = ogrid_file_locations[ggi,:]
+            # Now our initial protosubgrid
+            initial_left = na.zeros(3, dtype='int64')
+            idims = na.ones(3, dtype='int64') * (2**(level+1))
+            #if level == 6: raise RuntimeError
+            psg = ramses_reader.ProtoSubgrid(initial_left, idims,
+                            left_index, right_index, dims, fl)
+            self.proto_grids.append(self._recursive_patch_splitting(
+                    psg, idims, initial_left, 
+                    left_index, right_index, dims, fl))
+            sums = na.zeros(3, dtype='int64')
+            if len(self.proto_grids[level]) == 1: continue
+            for g in self.proto_grids[level]:
+                sums += [s.sum() for s in g.sigs]
+            assert(na.all(sums == dims.prod(axis=1).sum()))
+        self.num_grids = sum(len(l) for l in self.proto_grids)
+
+    #num_deep = 0
+
+    #@num_deep_inc
+    def _recursive_patch_splitting(self, psg, dims, ind,
+            left_index, right_index, gdims, fl):
+        min_eff = 0.2 # This isn't always respected.
+        if psg.efficiency > min_eff or psg.efficiency < 0.0:
+            return [psg]
+        tt, ax, fp = psg.find_split()
+        if (fp % 2) != 0:
+            if dims[ax] != fp + 1:
+                fp += 1
+            else:
+                fp -= 1
+        #print " " * self.num_deep + "Got ax", ax, "fp", fp
+        dims_l = dims.copy()
+        dims_l[ax] = fp
+        li_l = ind.copy()
+        if na.any(dims_l <= 0): return [psg]
+        L = ramses_reader.ProtoSubgrid(
+                li_l, dims_l, left_index, right_index, gdims, fl)
+        #print " " * self.num_deep + "L", tt, L.efficiency
+        if L.efficiency > 1.0: raise RuntimeError
+        if L.efficiency <= 0.0: L = []
+        elif L.efficiency < min_eff:
+            L = self._recursive_patch_splitting(L, dims_l, li_l,
+                    left_index, right_index, gdims, fl)
+        else:
+            L = [L]
+        dims_r = dims.copy()
+        dims_r[ax] -= fp
+        li_r = ind.copy()
+        li_r[ax] += fp
+        if na.any(dims_r <= 0): return [psg]
+        R = ramses_reader.ProtoSubgrid(
+                li_r, dims_r, left_index, right_index, gdims, fl)
+        #print " " * self.num_deep + "R", tt, R.efficiency
+        if R.efficiency > 1.0: raise RuntimeError
+        if R.efficiency <= 0.0: R = []
+        elif R.efficiency < min_eff:
+            R = self._recursive_patch_splitting(R, dims_r, li_r,
+                    left_index, right_index, gdims, fl)
+        else:
+            R = [R]
+        return L + R
+        
+    def _parse_hierarchy(self):
+        # We have important work to do
+        grids = []
+        gi = 0
+        for level, grid_list in enumerate(self.proto_grids):
+            for g in grid_list:
+                fl = g.grid_file_locations
+                props = g.get_properties()
+                self.grid_left_edge[gi,:] = props[0,:] / (2.0**(level+1))
+                self.grid_right_edge[gi,:] = props[1,:] / (2.0**(level+1))
+                self.grid_dimensions[gi,:] = props[2,:]
+                self.grid_levels[gi,:] = level
+                grids.append(self.grid(gi, self, level, fl, props[0,:]))
+                gi += 1
+        self.grids = na.array(grids, dtype='object')
+
+    def _get_grid_parents(self, grid, LE, RE):
+        mask = na.zeros(self.num_grids, dtype='bool')
+        grids, grid_ind = self.get_box_grids(LE, RE)
+        mask[grid_ind] = True
+        mask = na.logical_and(mask, (self.grid_levels == (grid.Level-1)).flat)
+        return self.grids[mask]
+
+    def _populate_grid_objects(self):
+        for gi,g in enumerate(self.grids):
+            parents = self._get_grid_parents(g,
+                            self.grid_left_edge[gi,:],
+                            self.grid_right_edge[gi,:])
+            if len(parents) > 0:
+                g.Parent.extend(parents.tolist())
+                for p in parents: p.Children.append(g)
+            g._prepare_grid()
+            g._setup_dx()
+        self.max_level = self.grid_levels.max()
+
+    def _setup_unknown_fields(self):
+        for field in self.field_list:
+            if field in self.parameter_file.field_info: continue
+            mylog.info("Adding %s to list of fields", field)
+            cf = None
+            if self.parameter_file.has_key(field):
+                def external_wrapper(f):
+                    def _convert_function(data):
+                        return data.convert(f)
+                    return _convert_function
+                cf = external_wrapper(field)
+            add_field(field, lambda a, b: None,
+                      convert_function=cf, take_log=False)
+
+    def _setup_derived_fields(self):
+        self.derived_field_list = []
+
+    def _setup_data_io(self):
+        self.io = io_registry[self.data_style](self.tree_proxy)
+

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Tue Aug 10 16:02:10 2010
@@ -625,31 +625,74 @@
 class GadgetStaticOutput(StaticOutput):
     _hierarchy_class = GadgetHierarchy
     _fieldinfo_class = GadgetFieldContainer
-    def __init__(self, filename, particles, dimensions = None,
-                 storage_filename = None):
-        StaticOutput.__init__(self, filename, 'gadget_binary')
-        self.storage_filename = storage_filename
-        self.particles = particles
-        if "InitialTime" not in self.parameters:
-            self.parameters["InitialTime"] = 0.0
-        self.parameters["CurrentTimeIdentifier"] = \
-            int(os.stat(self.parameter_filename)[ST_CTIME])
-        if dimensions is None:
-            dimensions = na.ones((3,), dtype='int64') * 32
-        self.parameters['TopGridDimensions'] = dimensions
-        self.parameters['DomainLeftEdge'] = na.zeros(3, dtype='float64')
-        self.parameters['DomainRightEdge'] = na.ones(3, dtype='float64')
-        self.parameters['TopGridRank'] = 3
-        self.parameters['RefineBy'] = 2
+    def __init__(self, h5filename,storage_filename=None) :
+        StaticOutput.__init__(self, h5filename, 'gadget_hdf5')
+        self.storage_filename = storage_filename #Don't know what this is
         self.field_info = self._fieldinfo_class()
-        self.units["Density"] = 1.0
-
+        x = self._get_param('maxlevel')**2
+        self.max_grid_size = (x,x,x)
+        self.parameters["InitialTime"] = 0.0
+        # These should be explicitly obtained from the file, but for now that
+        # will wait until a reorganization of the source tree and better
+        # generalization.
+        self.parameters["TopGridRank"] = 3
+        self.parameters["RefineBy"] = 2
+        
+        
     def _parse_parameter_file(self):
-        pass
-
+        # read the units in from the hdf5 file 
+        #fill in self.units dict
+        #fill in self.time_units dict (keys: 'days','years', '1')
+        
+        #import all of the parameter file params 
+        #this is NOT originally from the gadget snapshot but instead
+        #from the paramfile starting the sim
+        skips = ('TITLE','CLASS','VERSION') #these are just hdf5 crap
+        fh = h5py.File(self.parameter_filename)
+        for kw in fh['root'].attrs.keys():
+            if any([skip in kw for skip in skips]):
+                continue
+            val = fh['root'].attrs[kw]
+            if type(val)==type(''):
+                try:    val = cPickle.load(val)
+                except: pass
+            #also, includes unit info
+            setattr(self,kw,val)
+            
+    def _get_param(self,kw,location='/root'):
+        fh = h5py.File(self.parameter_filename)
+        val = fh[location].attrs[kw]
+        if type(val)==type(''):
+            try:    val = cPickle.load(val)
+            except: pass
+        return val
+            
     def _set_units(self):
-        self.units = {'1':1.0, 'unitary':1.0, 'cm':1.0}
+        #check out the unit params from _parse_parameter_file and use them
+        #code below is all filler
+        self.units = {}
         self.time_units = {}
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0
+        seconds = 1 #self["Time"]
+        self.time_units['years'] = seconds / (365*3600*24.0)
+        self.time_units['days']  = seconds / (3600*24.0)
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+        
+        
+    def _is_valid(self):
+        # check for a /root to exist in the h5 file
+        try:
+            h5f=h5py.File(self.h5filename)
+            valid = 'root' in h5f.items()[0]
+            h5f.close()
+            return valid
+        except:
+            pass
+        return False
 
 class ChomboStaticOutput(StaticOutput):
     _hierarchy_class = ChomboHierarchy
@@ -790,3 +833,156 @@
     @classmethod
     def _is_valid(self, *args, **kwargs):
         return os.path.exists(args[0] + "rhob")
+
+class FLASHStaticOutput(StaticOutput):
+    _hierarchy_class = FLASHHierarchy
+    _fieldinfo_class = FLASHFieldContainer
+    _handle = None
+    
+    def __init__(self, filename, data_style='flash_hdf5',
+                 storage_filename = None):
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+        self.field_info = self._fieldinfo_class()
+        # hardcoded for now
+        self.parameters["InitialTime"] = 0.0
+        # These should be explicitly obtained from the file, but for now that
+        # will wait until a reorganization of the source tree and better
+        # generalization.
+        self.parameters["TopGridRank"] = 3
+        self.parameters["RefineBy"] = 2
+        self.parameters["HydroMethod"] = 'flash' # always PPM DE
+        self.parameters["Time"] = 1. # default unit is 1...
+        
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        seconds = 1 #self["Time"]
+        self.time_units['years'] = seconds / (365*3600*24.0)
+        self.time_units['days']  = seconds / (3600*24.0)
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _find_parameter(self, ptype, pname, scalar = False, handle = None):
+        # We're going to implement handle caching eventually
+        if handle is None: handle = self._handle
+        if handle is None:
+            handle = h5py.File(self.parameter_filename, "r")
+        nn = "/%s %s" % (ptype,
+                {False: "runtime parameters", True: "scalars"}[scalar])
+        for tpname, pval in handle[nn][:]:
+            if tpname.strip() == pname:
+                return pval
+        raise KeyError(pname)
+
+    def _parse_parameter_file(self):
+        self.parameters["CurrentTimeIdentifier"] = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        self._handle = h5py.File(self.parameter_filename, "r")
+        self.parameters["DomainLeftEdge"] = na.array(
+            [self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
+        self.parameters["DomainRightEdge"] = na.array(
+            [self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
+        self._handle.close()
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        try:
+            fileh = h5py.File(args[0],'r')
+            if "bounding box" in fileh["/"].keys():
+                return True
+        except:
+            pass
+        return False
+
+class RAMSESStaticOutput(StaticOutput):
+    _hierarchy_class = RAMSESHierarchy
+    _fieldinfo_class = RAMSESFieldContainer
+    _handle = None
+    
+    def __init__(self, filename, data_style='ramses',
+                 storage_filename = None):
+        StaticOutput.__init__(self, filename, data_style)
+        self.storage_filename = storage_filename
+
+        self.field_info = self._fieldinfo_class()
+        # hardcoded for now
+        self.parameters["InitialTime"] = 0.0
+        # These should be explicitly obtained from the file, but for now that
+        # will wait until a reorganization of the source tree and better
+        # generalization.
+        self.parameters["TopGridRank"] = 3
+        self.parameters["RefineBy"] = 2
+        self.parameters["HydroMethod"] = 'ramses'
+        self.parameters["Time"] = 1. # default unit is 1...
+
+    def __repr__(self):
+        return self.basename.rsplit(".", 1)[0]
+        
+    def _set_units(self):
+        """
+        Generates the conversion to various physical _units based on the parameter file
+        """
+        self.units = {}
+        self.time_units = {}
+        if len(self.parameters) == 0:
+            self._parse_parameter_file()
+        self._setup_nounits_units()
+        self.conversion_factors = defaultdict(lambda: 1.0)
+        self.time_units['1'] = 1
+        self.units['1'] = 1.0
+        self.units['unitary'] = 1.0 / (self["DomainRightEdge"] - self["DomainLeftEdge"]).max()
+        seconds = 1 #self["Time"]
+        self.time_units['years'] = seconds / (365*3600*24.0)
+        self.time_units['days']  = seconds / (3600*24.0)
+        for key in yt2orionFieldsDict:
+            self.conversion_factors[key] = 1.0
+
+    def _setup_nounits_units(self):
+        z = 0
+        mylog.warning("Setting 1.0 in code units to be 1.0 cm")
+        if not self.has_key("TimeUnits"):
+            mylog.warning("No time units.  Setting 1.0 = 1 second.")
+            self.conversion_factors["Time"] = 1.0
+        for unit in mpc_conversion.keys():
+            self.units[unit] = mpc_conversion[unit] / mpc_conversion["cm"]
+
+    def _parse_parameter_file(self):
+        self.parameters["CurrentTimeIdentifier"] = \
+            int(os.stat(self.parameter_filename)[ST_CTIME])
+        import yt.ramses_reader as rr
+        self.ramses_tree = rr.RAMSES_tree_proxy(self.parameter_filename)
+        rheader = self.ramses_tree.get_file_info()
+        self.parameters.update(rheader)
+        self.parameters["DomainRightEdge"] = na.ones(3, dtype='float64') \
+                                           * rheader['boxlen']
+        self.parameters["DomainLeftEdge"] = na.zeros(3, dtype='float64')
+        self.parameters["TopGridDimensions"] = na.zeros(3, dtype='int64') + 2
+
+    @classmethod
+    def _is_valid(self, *args, **kwargs):
+        if not os.path.basename(args[0]).startswith("info_"): return False
+        fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
+        print fn
+        return os.path.exists(fn)
+

Added: trunk/yt/lagos/RAMSESFields.py
==============================================================================
--- (empty file)
+++ trunk/yt/lagos/RAMSESFields.py	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,52 @@
+"""
+RAMSES-specific fields
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from UniversalFields import *
+class RAMSESFieldContainer(CodeFieldInfoContainer):
+    _shared_state = {}
+    _field_list = {}
+RAMSESFieldInfo = RAMSESFieldContainer()
+add_ramses_field = RAMSESFieldInfo.add_field
+
+add_field = add_ramses_field
+
+translation_dict = {"Density":"density",
+                    "x-velocity":"velocity_x",
+                    "y-velocity":"velocity_y",
+                    "z-velocity":"velocity_z",
+                    "Pressure":"pressure",
+                    "Metallicity":"metallicity",
+                   }
+
+def _generate_translation(mine, theirs):
+    add_field(theirs, function=lambda a, b: b[mine], take_log=True)
+
+for f,v in translation_dict.items():
+    if v not in RAMSESFieldInfo:
+        add_field(v, function=lambda a,b: None, take_log=False,
+                  validators = [ValidateDataField(v)])
+    #print "Setting up translator from %s to %s" % (v, f)
+    _generate_translation(v, f)
+

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Tue Aug 10 16:02:10 2010
@@ -76,6 +76,8 @@
 from EnzoFields import *
 from OrionFields import *
 from ChomboFields import *
+from FLASHFields import *
+from RAMSESFields import *
 fieldInfo = EnzoFieldInfo
 
 # NOT the same as fieldInfo.add_field

Modified: trunk/yt/mods.py
==============================================================================
--- trunk/yt/mods.py	(original)
+++ trunk/yt/mods.py	Tue Aug 10 16:02:10 2010
@@ -39,14 +39,16 @@
 from performance_counters import yt_counters, time_function
 
 # Now individual component imports from lagos
-from yt.lagos import EnzoStaticOutput, \
+from yt.lagos import \
+    EnzoStaticOutput, OrionStaticOutput, TigerStaticOutput, \
+    FLASHStaticOutput, \
     BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
     derived_field, \
     add_field, FieldInfo, EnzoFieldInfo, Enzo2DFieldInfo, OrionFieldInfo, \
-    GadgetFieldInfo, TigerFieldInfo, \
+    GadgetFieldInfo, TigerFieldInfo, ChomboFieldInfo, FLASHFieldInfo, \
     Clump, write_clump_hierarchy, find_clumps, write_clumps, \
     get_lowest_clumps, \
-    OrionStaticOutput, HaloFinder, HOPHaloFinder, FOFHaloFinder, parallelHF, \
+    HaloFinder, HOPHaloFinder, FOFHaloFinder, parallelHF, \
     axis_names, x_dict, y_dict, TwoPointFunctions, FcnSet
 
 # This is a temporary solution -- in the future, we will allow the user to

Added: trunk/yt/ramses_headers/FortranUnformatted_IO.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/FortranUnformatted_IO.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,291 @@
+/*
+	FortranUnformatted_IO.hh
+	This file contains a C++ class for access to FORTRAN unformatted files
+			
+	Copyright (C) 2008  Oliver Hahn, ojha at gmx.de
+
+    It is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    Foobar is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with Foobar.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __FORTRAN_UNFORMATTED_HH
+#define __FORTRAN_UNFORMATTED_HH
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <stdexcept>
+#include <iterator>
+
+#define DEFAULT_ADDLEN 4
+
+//! A class to perform IO on FORTRAN unformatted files
+/*! FortranUnformatted provides sequential read access to FORTRAN
+    unformatted files.
+ */
+class FortranUnformatted{
+	
+protected:
+	std::string   m_filename;		//!< the file name 
+	std::fstream  m_ifs;			//!< STL ifstream object
+	int           m_addlen;			//!< number of bytes in pre-/suffix data of FORTRAN unformatted data
+		
+public:
+	
+	//! constructor for FortranUnformatted
+	/*! simple constructor for FortranUnformatted
+	 * @param filename the name  of the FORTRAN unformatted file to be opened for IO
+	 * @param mode a combination of std::ios_base::openmode determining the mode in which the files are openend
+	 * @param addlen the number of bytes which are pre- & postpended to unformatted arrays giving their size (default=4)
+	 */
+	explicit FortranUnformatted( std::string filename, std::ios_base::openmode mode = std::ios_base::in, int addlen=DEFAULT_ADDLEN )
+		: m_filename( filename ), 
+			m_ifs( m_filename.c_str(), mode|std::ios::binary ), 
+			m_addlen( addlen )
+	{ 
+		if(!m_ifs.good() || !m_ifs.is_open() )
+			throw std::runtime_error("FortranUnformatted : unable to open file \'"
+						+m_filename+"\'for read access");
+		m_ifs.exceptions ( std::fstream::eofbit | std::fstream::failbit 
+						| std::fstream::badbit );
+	}
+	
+	//! read data from FORTRAN unformatted file
+	/*! read a scalar from FORTRAN unformatted file. If the data is not scalar 
+		(i.e. an array) or the function is called with a template parameter of 
+		a different size than the stored data, a std::runtime_error is thrown.
+	 \param r reference to the return value
+	 */
+	template< typename T > void read( T& r )
+	{
+		unsigned n1,n2;
+		
+		try{
+			m_ifs.read( (char*)&n1, m_addlen );
+			m_ifs.read( (char*)&r, sizeof(T) );
+			m_ifs.read( (char*)&n2, m_addlen );
+		}catch(...){
+			throw;
+		}
+
+		if( n1 != sizeof(T) || n1 != n2 ){
+			throw std::runtime_error("FortranUnformatted::read : invalid type"\
+						" conversion when reading FORTRAN unformatted.");
+		}
+	}
+	
+	//! write a single variable, arbitrary type to the Fortran unformatted file
+	/*!
+	 * @param w the variable, arbitrary type
+	 */
+	template< typename T > void write( T& w )
+	{
+		unsigned n1 = sizeof(T);
+		
+		try{
+			m_ifs.write( (char*)&n1, m_addlen );
+			m_ifs.write( (char*)&w, n1 );
+			m_ifs.write( (char*)&n1, m_addlen );
+		}catch(...){
+			throw;
+		}
+	}
+	
+	
+	//! write a range of a container data object given by iterators
+	/*! 
+	 * @param begin iterator pointing to the beginning of the range
+	 * @param end iterator pointing to the end of the range
+	 */
+	template< typename _InputIterator >
+	void write( _InputIterator begin, _InputIterator end )
+	{
+		_InputIterator it(begin);
+		unsigned nelem = std::distance(begin,end);
+		unsigned sz = sizeof(*begin);
+		unsigned totsz = sz*nelem;
+		
+		try{
+			m_ifs.write( (char*)&totsz, m_addlen );
+			while( it!=end ){
+				m_ifs.write( (char*)&(*it), sz );
+				++it;
+			}
+			m_ifs.write( (char*)&totsz, m_addlen );
+		}catch(...){
+			throw;
+		}
+	}
+	
+	//! read masked data
+	/*! this function reads data but discards all those for which the iterator
+	 *  mask is not set to a 'true' bit. It is necessary that the mask iterator
+	 *  can be increased the same number of times as there is data to be read.
+	 * @param mask an iterator which is increased for each element read from the file and determines rejection or not
+	 * @param data an output iterator to which the data is written
+	 */
+	template< typename basetype, typename _InputIterator, typename _OutputIterator >
+	_InputIterator read( _InputIterator mask, _OutputIterator data )
+	{
+		_InputIterator oldmask = mask;
+		std::vector<basetype> temp;
+		typename std::vector<basetype>::iterator temp_it;
+		
+		unsigned n1,n2;
+		try{
+			m_ifs.read( (char*)&n1, m_addlen );
+			temp.resize(n1/sizeof(basetype));
+			m_ifs.read( (char*)&temp[0], n1 );
+			
+			for( temp_it = temp.begin(); temp_it!=temp.end(); ++temp_it ){
+				//... copy data if masked - this also performs a type conversion if needed ...//
+				if( *mask )
+					*data = *temp_it;
+				oldmask = mask++;
+				if( mask == oldmask ) break;
+			}
+			//std::copy(temp.begin(),temp.end(), data);
+			
+			m_ifs.read( (char*)&n2, m_addlen );
+		
+		}catch(...){
+			throw std::runtime_error("FortranUnformatted::read : error reading FORTRAN unformatted.");
+		}
+		if( n1!=n2 )
+			throw std::runtime_error("FortranUnformatted::read : invalid type conversion when"\
+						" reading FORTRAN unformatted.");
+		
+		return mask;
+	}
+	
+	
+	//! read data
+	/*! this function reads data from a Fortran unformatted file and writes it to
+	 *  an output iterator.
+	 * @param data an output iterator to which the data is written
+	 */
+	template< typename basetype, typename _OutputIterator >
+	_OutputIterator read( _OutputIterator data )
+	{
+		std::vector<basetype> temp;
+		typename std::vector<basetype> temp_it;
+		
+		unsigned n1,n2;
+		try{
+			m_ifs.read( (char*)&n1, m_addlen );
+			temp.resize(n1/sizeof(basetype));
+			m_ifs.read( (char*)&temp[0], n1 );
+			//... copy data - this also performs a type conversion if needed ...//
+			std::copy(temp.begin(),temp.end(), data);
+			m_ifs.read( (char*)&n2, m_addlen );
+		
+		}catch(...){
+			throw std::runtime_error("FortranUnformatted::read : error reading FORTRAN unformatted.");
+		}
+		if( n1!=n2 )
+			throw std::runtime_error("FortranUnformatted::read : invalid type conversion when"\
+						" reading FORTRAN unformatted.");
+		
+		
+		return data;
+	}
+	
+	//! check if beyond end-of-file
+	bool eof( void )
+	{
+		bool bcheck;
+		try{
+			bcheck = (m_ifs.peek()==EOF);
+		}catch(...){
+			m_ifs.clear();
+			return true;
+		}
+		return bcheck;
+	}
+		
+	//! skip ahead in FORTRAN unformatted file
+	/*! skips n datasets ahead in FORTRAN unformatted file. Equivalent to 
+	 *	n READ(X) without argument in FORTRAN
+	 *  @param n number of entries to skip ahead (scalar or arrays)
+	 */
+	void skip_n( unsigned n )
+	{
+		unsigned ndone = 0;
+		while( ndone < n ){
+			int n1, n2;
+			try{
+				m_ifs.read( (char*)&n1, m_addlen );
+				m_ifs.seekg( n1, std::ios::cur );
+				m_ifs.read( (char*)&n2, m_addlen );
+			}catch(...){
+				throw std::runtime_error("FortranUnformatted::skip_n : error seeking in FORTRAN unformatted file.");
+			}
+			++ndone;
+		}
+	}
+	
+	//! just a std::streampos
+	typedef std::streampos streampos;
+	
+	//! wrapper for the std::ifstream::tellg() function
+	streampos tellg( void )
+	{ return m_ifs.tellg(); }
+	
+	//! wrapper for the std::ifstream::seekg() function
+	void seekg( streampos pos )
+	{ m_ifs.seekg( pos ); /*, std::ios::beg );*/ }
+	
+	
+	//! jump to dataset in FORTRAN unformatted files
+	/*! moves past the nth WRITE(X) operation from the beginning in a FORTRAN 
+	 *	unformatted file 
+	 * @param n number of entries to skip (scalar or arrays)
+	 * @sa skip_n()
+	 */
+	void skip_n_from_start( unsigned n )
+	{
+		m_ifs.seekg(0,std::ios::beg);
+		skip_n( n );
+	}
+	
+	//! skip ahead in FORTRAN unformatted file
+	/*! skips to next dataset in FORTRAN unformatted file. Equivalent to a 
+	 * READ(X) without argument in FORTRAN
+	 * @sa skip_n
+	 */
+	void skip( void )
+	{
+		skip_n(1);
+	}
+
+	//! rewind file
+	/*! rewinds the file to the beginning 
+	 */
+	void rewind( void )
+	{
+		m_ifs.seekg(0,std::ios::beg);
+	}
+
+	
+	
+	/*template< typename T >
+	friend FortranUnformatted& operator>> (FortranUnformatted &is, T& data);
+
+	template< typename T >
+	friend FortranUnformatted& operator>> (FortranUnformatted &is, std::vector<T>& data);*/
+};
+
+
+
+#endif //__FORTRAN_UNFORMATTED_HH

Added: trunk/yt/ramses_headers/RAMSES_amr_data.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_amr_data.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,970 @@
+/*
+		This file is part of libRAMSES++ 
+			a C++ library to access snapshot files 
+			generated by the simulation code RAMSES by R. Teyssier
+		
+    Copyright (C) 2008-09  Oliver Hahn, ojha at gmx.de
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __RAMSES_AMR_DATA_HH
+#define __RAMSES_AMR_DATA_HH
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <map>
+#include <cmath>
+#include <iterator>
+
+#include "FortranUnformatted_IO.hh"
+#include "RAMSES_info.hh"
+#include "RAMSES_amr_data.hh"
+
+#ifndef FIX
+#define FIX(x)	((int)((x)+0.5))
+#endif
+
+#define ACC_NL(cpu,lvl)   ((cpu)+m_header.ncpu*(lvl))
+#define ACC_NLIT(cpu,lvl) ((cpu)+m_plevel->m_header.ncpu*(lvl))
+#define ACC_NB(cpu,lvl)   ((cpu)+m_header.nboundary*(lvl))
+
+#define LENGTH_POINTERLISTS 4096
+#define ENDPOINT ((unsigned)(-1))
+
+namespace RAMSES{
+namespace AMR{
+	
+/**************************************************************************************\
+ *** auxiliary datatypes **************************************************************
+\**************************************************************************************/
+	
+template< typename real_t>
+struct vec{
+	real_t x,y,z;
+		
+	vec( real_t x_, real_t y_, real_t z_ )
+	: x(x_),y(y_),z(z_)
+	{ }
+		
+	vec( const vec& v )
+	: x(v.x),y(v.y),z(v.z)
+	{ }
+		
+	vec( void )
+	: x(0.0), y(0.0), z(0.0)
+	{ }
+};
+
+/**************************************************************************************\
+ *** AMR cell base types **************************************************************
+\**************************************************************************************/
+
+template <typename id_t=unsigned, typename real_t=float>
+class cell_locally_essential{
+public:
+	id_t m_neighbour[6];
+	id_t m_father;
+	id_t m_son[8];
+	real_t m_xg[3];
+	id_t m_cpu;
+
+	char m_pos;
+	
+	cell_locally_essential(){}
+		
+	bool is_refined( int ison ) const
+	{	return ((int)m_son[ison]!=-1); }	
+};
+
+template <typename id_t=unsigned, typename real_t=float>
+class cell_simple{
+public:
+	id_t m_son[1];
+	real_t m_xg[3];
+	id_t m_cpu;
+
+	char m_pos;
+	
+	cell_simple(){}
+		
+	bool is_refined( int ison=0 ) const
+	{	return ((int)m_son[0]!=-1); }	
+};
+
+
+//.... some type traits that are used to distinguish what data needs to be read ....//
+template<class X> struct is_locally_essential
+{ enum { check=false }; };
+
+template<> struct is_locally_essential<cell_locally_essential<> > 
+{ enum { check=true }; };
+
+template<> struct is_locally_essential<cell_simple<> > 
+{ enum { check=false }; };
+
+
+/**************************************************************************************\
+ *** AMR level class definition, subsumes a collection of AMR cells *******************
+\**************************************************************************************/
+
+//! AMR level implementation
+template< typename Cell_ >
+class level{
+public:
+	unsigned m_ilevel;
+	std::vector< Cell_ > m_level_cells;
+	
+	double m_xc[8];			//!< relative x-offsets of the 8 children for current refinement level
+	double m_yc[8];			//!< relative y-offsets of the 8 children for current refinement level
+	double m_zc[8];			//!< relative z-offsets of the 8 children for current refinement level
+	
+	typedef typename std::vector< Cell_ >::iterator iterator;
+	typedef typename std::vector< Cell_ >::const_iterator const_iterator;
+
+	double 		m_dx;
+	unsigned 	m_nx;
+
+	level( unsigned ilevel )
+	: m_ilevel( ilevel )
+	{
+		m_dx = pow(0.5,ilevel+1);
+		m_nx = (unsigned)(1.0/m_dx);
+		
+		for( unsigned k=1; k<=8; k++ )
+		{
+			//... initialize positions of son cell centres
+			//... relative to parent cell centre
+			unsigned 
+				iz=(k-1)/4,
+				iy=(k-1-4*iz)/2,
+				ix=(k-1-2*iy-4*iz);
+			m_xc[k-1]=((double)ix-0.5f)*m_dx;
+			m_yc[k-1]=((double)iy-0.5f)*m_dx;
+			m_zc[k-1]=((double)iz-0.5f)*m_dx;
+		}
+	}
+	
+	void register_cell( const Cell_ & cell )
+	{ m_level_cells.push_back( cell ); }
+	
+	const_iterator begin( void ) const{ return m_level_cells.begin(); }
+	iterator begin( void ){ return m_level_cells.begin(); }
+	
+	const_iterator end( void ) const{ return m_level_cells.end(); }
+	iterator end( void ){ return m_level_cells.end(); }
+	
+	Cell_& operator[]( unsigned i )
+	{ return m_level_cells[i]; }
+	
+	unsigned size( void ) { return m_level_cells.size(); }
+};
+
+
+/**************************************************************************************\
+ *** constants ************************************************************************
+\**************************************************************************************/
+
+//! neighbour cell access pattern
+const static int nbor_cell_map[6][8] = 
+{ 
+	{1,0,3,2,5,4,7,6},
+	{1,0,3,2,5,4,7,6},
+	{2,3,0,1,6,7,4,5},
+	{2,3,0,1,6,7,4,5},
+	{4,5,6,7,0,1,2,3},
+	{4,5,6,7,0,1,2,3} 
+};
+
+
+/**************************************************************************************\
+ *** AMR tree class implements the hierarchy of AMR levels with links *****************
+\**************************************************************************************/
+
+/*!
+ * @class tree
+ * @brief encapsulates the hierarchical AMR structure data from a RAMSES simulation snapshot
+ *
+ * This class provides low-level read access to RAMSES amr_XXXXX.out files. 
+ * Data from a given list of computational domains can be read and is
+ * stored in internal datastructures.
+ * Access to hydrodynamical variables stored in the cells is provided 
+ * through member functions of class RAMSES_hydro_level and iterators 
+ * provided by this class
+ * @sa RAMSES_hydro_level
+ */
+template< class Cell_, class Level_ >
+class tree
+{
+
+public:
+	
+	//! header amr meta-data structure, for details see also RAMSES source code (file amr/init_amr.f90)
+	struct header{ 
+		std::vector< int > nx;			//!< base mesh resolution [3-vector]
+		std::vector< int > nout;		//!< 3 element vector: [noutput2, iout2, ifout2]
+		std::vector< int > nsteps;		//!< 2 element vector: [nstep, nstep_coarse]
+		int ncpu;						//!< number of CPUs (=computational domains) in the simulation
+		int ndim;						//!< number of spatial dimensions
+		int nlevelmax;					//!< maximum refinement level allowed
+		int ngridmax;					//!< maxmium number of grid cells stored per CPU
+		int nboundary;					//!< number of boundary cells (=ghost cells)
+		int ngrid_current;				//!< currently active grid cells
+		double boxlen;					//!< length of the simulation box
+		std::vector<double> tout;		//!< output times (1..noutput)
+		std::vector<double> aout;		//!< output times given as expansion factors (1..noutput)
+		std::vector<double> dtold;		//!< old time steps (1..nlevelmax)
+		std::vector<double> dtnew;		//!< next time steps (1..nlevelmax)
+		std::vector<double> stat;		//!< some diagnostic snapshot meta data: [const, mass_tot0, rho_tot]
+		std::vector<double> cosm;		//!< cosmological meta data: [omega_m, omega_l, omega_k, omega_b, h0, aexp_ini, boxlen_ini)
+		std::vector<double> timing;		//!< timing information: [aexp, hexp, aexp_old, epot_tot_int, epot_tot_old]
+		double t;						//!< time stamp of snapshot
+		double mass_sph;				//!< mass threshold used to flag for cell refinement
+	};
+	
+	std::vector< Level_ > m_AMR_levels;	//! STL vector holding the AMR level hierarchy
+	
+	std::vector<unsigned> 
+		m_headl,						//!< head indices, point to first active cell
+		m_numbl, 						//!< number of active cells
+		m_taill;						//!< tail indices, point to last active cell
+
+	int m_cpu;							//! index of computational domain being accessed
+	int m_minlevel;						//! lowest refinement level to be loaded
+	int m_maxlevel;						//! highest refinement level to be loaded
+	std::string m_fname;				//! the snapshot filename amr_XXXXX.out
+	unsigned m_ncoarse;					//! number of coarse grids
+	struct header m_header;				//! the header meta data
+	
+	
+protected:
+	
+  //! prototypical grid iterator
+  /*! iterates through cells on one level, provides access to neighbours, parents, children
+   */
+	template< typename TreePointer_, typename Index_=unsigned >
+	class proto_iterator{
+		public:
+		
+			friend class tree;
+			
+			typedef Index_ value_type;
+			typedef Index_& reference;
+			typedef Index_* pointer;
+			
+		protected:
+		
+			Index_
+				m_ilevel,		//!< refinement level on which we iteratre
+				m_icpu;			//!< domain on which we iterate
+				
+			typedef typename std::vector< Cell_ >::const_iterator Iterator_;
+			Iterator_	 m_cell_it; //!< basis iterator that steps through cells on one level
+			TreePointer_ m_ptree; //!< pointer to associated tree object
+			
+			
+			//! low_level construtor that should only be used from within AMR_tree
+			proto_iterator( unsigned level_, unsigned cpu_, Iterator_ it_, TreePointer_ ptree_ )
+			: m_ilevel(level_), m_icpu(cpu_), m_cell_it(it_), m_ptree( ptree_ )
+			{ }
+			
+		public:
+		
+			//! this is either the copy-constructor or a constructor for implicit type conversion
+			template< typename X >
+			proto_iterator( proto_iterator<X> &it )
+			: m_ilevel( it.m_ilevel ), m_icpu( it.m_icpu ), m_cell_it( it.m_cell_it ), m_ptree( it.m_ptree )
+			{ }
+	
+			//! empty constructor, doesn't initialize anything
+			proto_iterator()
+			: m_ilevel(0), m_icpu(0), m_ptree(NULL)
+			{ }
+			
+			//! test for equality between two proto_iterator instantiations
+			template< typename X >
+			inline bool operator==( const proto_iterator<X>& x ) const
+			{ return m_cell_it==x.m_cell_it; }
+
+			//! test for inequality between two proto_iterator instantiations	
+			template< typename X >
+			inline bool operator!=( const proto_iterator<X>& x ) const
+			{ return m_cell_it!=x.m_cell_it; }
+
+			//! iterate forward, prefix
+			inline proto_iterator& operator++()
+			{ ++m_cell_it; return *this; }
+			
+			//! iterate forward, postfix
+			inline proto_iterator operator++(int)
+			{ proto_iterator<TreePointer_> it(*this); operator++(); return it; }
+
+            inline void next(void) { operator++(); }
+			
+			//! iterate backward, prefix
+			inline proto_iterator& operator--()
+			{ --m_cell_it; return *this; }
+			
+			//! iterate backward, postfix
+			inline proto_iterator operator--(int)
+			{ proto_iterator<TreePointer_> it(*this); operator--(); return it; }
+			
+			//! iterate several forward
+			inline proto_iterator operator+=(int i)
+			{ proto_iterator<TreePointer_> it(*this); m_cell_it+=i; return it; }
+			
+			//! iterate several backward
+			inline proto_iterator operator-=(int i)
+			{ proto_iterator<TreePointer_> it(*this); m_cell_it-=i; return it; }
+			
+			//! assign two proto_iterators, this will fail if no typecast between X and TreePoint_ exists
+			template< typename X >
+			inline proto_iterator& operator=(const proto_iterator<X>& x)
+			{ m_cell_it = x.m_cell_it; m_ilevel = x.m_ilevel; m_icpu = x.m_icpu; m_ptree = x.m_ptree; return *this; }
+			
+			//! iterator dereferencing, returns an array index
+			inline Cell_ operator*() const
+			{ return *m_cell_it; }
+			
+            inline Index_ get_cell_father() const { return (*m_cell_it).m_father; }
+            inline bool is_finest(int ison) { return !((*m_cell_it).is_refined(ison)); }
+			
+			//! move iterator to a child grid
+			/*!
+			 * @param  ind index of the child grid in the parent oct (0..7)
+			 * @return iterator pointing to child grid if it exists, otherwise 'end' of currrent level
+			 */
+			//template< typename X >
+			inline proto_iterator& to_child( int ind )
+			{
+				if( !(*m_cell_it).is_refined(ind) )
+					 return (*this = m_ptree->end( m_ilevel ));
+				++m_ilevel;
+				m_cell_it = m_ptree->m_AMR_levels[m_ilevel].begin()+(*m_cell_it).m_son[ind];
+				return *this;
+			}
+			
+			
+			//! get an iterator to a child grid
+			/*!
+			 * @param  ind index of the child grid in the parent oct (0..7)
+			 * @return iterator pointing to child grid if it exists, otherwise 'end' of currrent level
+			 */
+			//template< typename X >
+			inline proto_iterator get_child( int ind ) const
+			{
+				proto_iterator it(*this);
+				it.to_child( ind );
+				return it;
+			}
+			
+			inline char get_child_from_pos( double x, double y, double z ) const
+			{
+				bool  bx,by,bz;
+				bx = x > (*m_cell_it).m_xg[0];
+				by = y > (*m_cell_it).m_xg[1];
+				bz = z > (*m_cell_it).m_xg[2];
+				
+				//std::cerr << "(" << bx << ", " << by << ", " << bz << ")\n";
+				
+				return (char)bx+2*((char)by+2*(char)bz);
+			}
+			
+			
+			//! move iterator to the parent grid
+			/*! 
+			 * @return iterator pointing to the parent grid if it exists, 'end' of the current level otherwise
+			 */
+			
+			inline proto_iterator& to_parent( void )
+			{
+				if( m_ilevel==0 )
+					return (*this = m_ptree->end( m_ilevel ));
+				--m_ilevel;
+				m_cell_it = m_ptree->m_AMR_levels[m_ilevel].begin()+(*m_cell_it).m_father;
+				return *this;
+			}
+			
+			//! query whether a given child cell is refined
+			inline bool is_refined( int i ) const
+			{
+				return (*m_cell_it).is_refined(i);
+			}
+			
+			//! get an iterator to the parent grid
+			/*! 
+			 * @return iterator pointing to the parent grid if it exists, 'end' of the current level otherwise
+			 */
+			inline proto_iterator get_parent( void ) const
+			{
+				proto_iterator it(*this);
+				it.to_parent();
+				return it;
+			}
+			
+			//! move iterator to spatial neighbour grid
+			/*!
+			 * @param ind index of neighbour (0..5)
+			 * @return iterator pointing to neighbour grid if it exists, otherwise 'end' of currrent level
+			 */
+			inline proto_iterator& to_neighbour( int ind )
+			{
+				unsigned icell = nbor_cell_map[ind][(int)(*m_cell_it).m_pos];
+				m_cell_it = m_ptree->m_AMR_levels[m_ilevel-1].begin()+(*m_cell_it).m_neighbour[ind];
+				
+				if( !(*m_cell_it).is_refined(icell) )
+					return (*this = m_ptree->end(m_ilevel));
+				
+				m_cell_it = m_ptree->m_AMR_levels[m_ilevel].begin()+(*m_cell_it).m_son[icell];
+				return *this;
+			}
+			
+			//! get an iterator to spatial neighbour grid
+			/*!
+			 * @param ind index of neighbour (0..5)
+			 * @return iterator pointing to neighbour grid if it exists, otherwise 'end' of currrent level
+			 */
+			inline proto_iterator& get_neighbour( int ind )
+			{
+				proto_iterator it(*this);
+				it.to_neighbour(ind);
+				return it;
+			}
+			
+			inline Index_ get_level( void ) const
+			{ return m_ilevel; }
+			
+			inline int get_domain( void ) const
+			{ return (*m_cell_it).m_cpu; }
+			
+			inline int get_absolute_position( void ) const 
+			{
+				return (unsigned)(std::distance<Iterator_>(m_ptree->m_AMR_levels[m_ilevel].begin(),m_cell_it));
+			}
+			
+			
+	};
+	
+public:
+	
+	typedef proto_iterator<const tree*> const_iterator;
+	typedef proto_iterator<tree*>       iterator;
+	
+
+
+protected:
+	
+	//! read header meta data from amr snapshot file
+	void read_header( void );
+	
+
+	
+	//! generate the amr_XXXXX.out filename for a given computational domain
+	/*! @param icpu index of comutational domain (base 1)
+	 */
+	std::string gen_fname( int icpu );
+	
+	//! generate the amr_XXXXX.out filename from the path to the info_XXXXX.out file
+	std::string rename_info2amr( const std::string& info );
+
+
+	#define R_SQR(x) ((x)*(x))
+	
+	template< typename Real_ >
+	inline bool ball_intersection( const vec<Real_>& xg, double dx2, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dmin = 0, bmin, bmax;
+		
+		//.. x ..//
+		bmin = xg.x-dx2;
+		bmax = xg.x+dx2;
+		if( xc.x < bmin ) dmin += R_SQR(xc.x - bmin ); else
+		if( xc.x > bmax ) dmin += R_SQR(xc.x - bmax );
+			
+		//.. y ..//
+		bmin = xg.y-dx2;
+		bmax = xg.y+dx2;
+		if( xc.y < bmin ) dmin += R_SQR(xc.y - bmin ); else
+		if( xc.y > bmax ) dmin += R_SQR(xc.y - bmax );
+		
+		//.. x ..//
+		bmin = xg.z-dx2;
+		bmax = xg.z+dx2;
+		if( xc.z < bmin ) dmin += R_SQR(xc.z - bmin ); else
+		if( xc.z > bmax ) dmin += R_SQR(xc.z - bmax );
+		
+		if( dmin <= r2 ) return true;
+		return false;
+	}
+	
+	template< typename Real_ >
+	inline bool shell_intersection( const vec<Real_>& xg, double dx2, const vec<Real_>& xc, Real_ r1_2, Real_ r2_2 )
+	{
+		Real_ dmax = 0, dmin = 0, a, b, bmin, bmax;
+		if( r1_2 > r2_2 ) std::swap(r1_2,r2_2);
+			
+		//.. x ..//
+		bmin = xg.x-dx2;
+		bmax = xg.x+dx2;
+		a = R_SQR( xc.x - bmin );
+		b = R_SQR( xc.x - bmax );
+		dmax += std::max( a, b );
+		if( xc.x < bmin ) dmin += a; else
+		if( xc.x > bmax ) dmin += b;
+		
+		//.. y ..//
+		bmin = xg.y-dx2;
+		bmax = xg.y+dx2;
+		a = R_SQR( xc.y - bmin );
+		b = R_SQR( xc.y - bmax );
+		dmax += std::max( a, b );
+		if( xc.y < bmin ) dmin += a; else
+		if( xc.y > bmax ) dmin += b;
+			
+		//.. z ..//
+		bmin = xg.z-dx2;
+		bmax = xg.z+dx2;
+		a = R_SQR( xc.z - bmin );
+		b = R_SQR( xc.z - bmax );
+		dmax += std::max( a, b );
+		if( xc.z < bmin ) dmin += a; else
+		if( xc.z > bmax ) dmin += b;
+		
+		
+		if( dmin <= r2_2 && r1_2 <= dmax ) return true;
+		return false;
+	}
+	
+	template< typename Real_ >
+	inline bool sphere_intersection( const vec<Real_>& xg, double dx2, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dmax = 0, dmin = 0, a, b, bmin, bmax;
+		
+		//.. x ..//
+		bmin = xg.x-dx2;
+		bmax = xg.x+dx2;
+		a = R_SQR( xc.x - bmin );
+		b = R_SQR( xc.x - bmax );
+		dmax += std::max( a, b );
+		if( xc.x < bmin ) dmin += a; else
+		if( xc.x > bmax ) dmin += b;
+		
+		//.. y ..//
+		bmin = xg.y-dx2;
+		bmax = xg.y+dx2;
+		a = R_SQR( xc.y - bmin );
+		b = R_SQR( xc.y - bmax );
+		dmax += std::max( a, b );
+		if( xc.y < bmin ) dmin += a; else
+		if( xc.y > bmax ) dmin += b;
+			
+		//.. z ..//
+		bmin = xg.z-dx2;
+		bmax = xg.z+dx2;
+		a = R_SQR( xc.z - bmin );
+		b = R_SQR( xc.z - bmax );
+		dmax += std::max( a, b );
+		if( xc.z < bmin ) dmin += a; else
+		if( xc.z > bmax ) dmin += b;
+		
+		
+		if( dmin <= r2 && r2 <= dmax ) return true;
+		return false;
+	}
+	
+	#undef R_SQR
+	
+public:
+	
+	//! low-level constructor - should not be called from outside because then you can really screw up things
+	/*!
+	 * @param snap the associated RAMSES::snapshot object
+	 * @param cpu domain for which to read the AMR tree
+	 * @param maxlevel maximum refinement level to consider
+	 * @param minlevel minimum refinement level to consider (default=1)
+	 */
+	tree( RAMSES::snapshot& snap, int cpu, int maxlevel, int minlevel=1 )
+	: m_cpu( cpu ), m_minlevel( minlevel ), m_maxlevel( maxlevel ), m_fname( rename_info2amr(snap.m_filename) )
+	{ 
+		read_header();
+
+		if( cpu > m_header.ncpu || cpu <= 0)
+			throw std::runtime_error("RAMSES_particle_data: expect to read from out of range CPU.");
+		
+	}
+	
+	//! perform the read operation of AMR data
+	void read( void );
+	
+	//! end const_iterator for given refinement level
+	const_iterator end( int ilevel ) const
+	{ 
+		if( ilevel <= m_maxlevel )
+			return const_iterator( ilevel, m_cpu, m_AMR_levels.at(ilevel).end(), this );
+		else
+			return const_iterator( ilevel, m_cpu, m_AMR_levels.at(0).end(), this );
+	}
+	
+	//! end iterator for given refinement level
+	iterator end( int ilevel )
+	{ 
+		if( ilevel <= m_maxlevel )
+			return iterator( ilevel, m_cpu, m_AMR_levels.at(ilevel).end(), this ); 
+		else
+			return iterator( ilevel, m_cpu, m_AMR_levels.at(0).end(), this ); 
+	}
+	
+	//! begin const_iterator for given refinement level
+	const_iterator begin( int ilevel ) const
+	{	
+		if( ilevel <= m_maxlevel )
+			return const_iterator( ilevel, m_cpu, m_AMR_levels.at(ilevel).begin(), this ); 
+		else
+			return this->end(ilevel);
+	}
+	
+	//! begin iterator for given refinement level
+	iterator begin( int ilevel )
+	{	
+		if( ilevel <= m_maxlevel )
+			return iterator( ilevel, m_cpu, m_AMR_levels.at(ilevel).begin(), this ); 
+		else
+			return this->end(ilevel);
+	}
+	
+	
+	//! return the center of a child cell associated with a grid iterator
+	/*!
+	 * @param it grid iterator
+	 * @param ind sub-cell index (0..7)
+	 * @return vec vector containing the coordinates
+	 */
+	template< typename Real_ >
+	inline vec<Real_> cell_pos( const iterator& it, unsigned ind )
+	{
+		vec<Real_> pos;
+		pos.x = (*it).m_xg[0]+m_AMR_levels[it.m_ilevel].m_xc[ind];
+		pos.y = (*it).m_xg[1]+m_AMR_levels[it.m_ilevel].m_yc[ind];
+		pos.z = (*it).m_xg[2]+m_AMR_levels[it.m_ilevel].m_zc[ind];
+		return pos;
+	}
+	
+	//! return the center of the grid associated with a grid iterator
+	/*!
+	 * @param it grid iterator
+	 * @return vec vector containing the coordinates
+	 */
+	template< typename Real_ >
+	inline vec<Real_> grid_pos( const iterator& it )
+	{
+		vec<Real_> pos;
+		pos.x = (*it).m_xg[0];
+		pos.y = (*it).m_xg[1];
+		pos.z = (*it).m_xg[2];
+		return pos;
+	}
+	
+	template< typename Real_ >
+	inline bool ball_intersects_grid( const iterator& it, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level());
+		vec<Real_> xg = grid_pos<Real_>(it);
+		return ball_intersection( xg, dx2, xc, r2 );
+	}
+	
+	template< typename Real_ >
+	inline bool ball_intersects_cell( const iterator& it, char ind, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level()+1);
+		vec<Real_> xg = cell_pos<Real_>(it,ind);
+		return ball_intersection( xg, dx2, xc, r2 );
+	}
+		
+	template< typename Real_ >
+	inline bool shell_intersects_grid( iterator& it, const vec<Real_>& xc, Real_ r1_2, Real_ r2_2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level());
+		vec<Real_> xg = grid_pos<Real_>(it);
+		return shell_intersection( xg, dx2, xc, r1_2, r2_2 );
+	}
+	
+	template< typename Real_ >
+	inline bool shell_intersects_cell( iterator& it, char ind, const vec<Real_>& xc, Real_ r1_2, Real_ r2_2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level()+1);
+		vec<Real_> xg = cell_pos<Real_>(it,ind);
+		return shell_intersection( xg, dx2, xc, r1_2, r2_2 );
+	}
+		
+	template< typename Real_ >
+	inline bool sphere_intersects_grid( const iterator& it, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level());
+		vec<Real_> xg = grid_pos<Real_>(it);
+		return sphere_intersection( xg, dx2, xc, r2 );
+	}
+	
+	template< typename Real_ >
+	inline bool sphere_intersects_cell( const iterator& it, char ind, const vec<Real_>& xc, Real_ r2 )
+	{
+		Real_ dx2 = 0.5/pow(2,it.get_level()+1);
+		vec<Real_> xg = cell_pos<Real_>(it,ind);
+		return sphere_intersection( xg, dx2, xc, r2 );
+	}
+	
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+template< class Cell_, class Level_ >
+void tree<Cell_,Level_>::read_header( void )
+{
+	FortranUnformatted ff( gen_fname(m_cpu) );
+	
+	//-- read header data --//
+	
+	ff.read( m_header.ncpu );
+	ff.read( m_header.ndim );
+	ff.read<unsigned>( std::back_inserter(m_header.nx) );
+	ff.read( m_header.nlevelmax );
+	ff.read( m_header.ngridmax );
+	ff.read( m_header.nboundary );
+	ff.read( m_header.ngrid_current );
+	ff.read( m_header.boxlen );
+	
+	ff.read<unsigned>( std::back_inserter(m_header.nout) );
+	ff.read<double>( std::back_inserter(m_header.tout) );
+	ff.read<double>( std::back_inserter(m_header.aout) );
+	ff.read( m_header.t );
+	ff.read<double>( std::back_inserter(m_header.dtold) );
+	ff.read<double>( std::back_inserter(m_header.dtnew) );
+	ff.read<unsigned>( std::back_inserter(m_header.nsteps) );
+	ff.read<double>( std::back_inserter(m_header.stat) );
+	ff.read<double>( std::back_inserter(m_header.cosm) );
+	ff.read<double>( std::back_inserter(m_header.timing) );
+	ff.read( m_header.mass_sph );
+	
+	m_ncoarse = m_header.nx[0]*m_header.nx[1]*m_header.nx[2];
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+template< class Cell_, class Level_ >
+std::string tree<Cell_,Level_>::gen_fname( int icpu )
+{
+	std::string fname;
+	char ext[32];
+	fname = m_fname;
+	fname.erase(fname.rfind('.')+1);
+	sprintf(ext,"out%05d",icpu);
+	fname.append(std::string(ext));
+	return fname;
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+template< class Cell_, class Level_ >
+std::string tree<Cell_,Level_>::rename_info2amr( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("info");
+	amr = info.substr(0,ii)+"amr" + info.substr(ii+4, 6) + ".out00001";
+	return amr;
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+template< class Cell_, class Level_ >
+void tree<Cell_,Level_>::read( void )
+{
+	// indexing map used to associate RAMSES internal cell IDs with new IDs
+	std::map<unsigned,unsigned> m_ind_grid_map;
+
+	std::vector<int> cell_cpu;
+	std::vector<unsigned> cell_level;
+	std::vector<unsigned> itmp;
+	
+	
+	typename std::vector< Level_ >::iterator amr_level_it;
+	
+	FortranUnformatted ff( gen_fname( m_cpu ) );
+		
+	//.. skip header entries ..//
+	ff.skip_n_from_start( 19 ); 			//.. skip header 
+		
+	//+ headl + taill
+	ff.read<int>( std::back_inserter(m_headl) );
+	ff.read<int>( std::back_inserter(m_taill) );
+	ff.read<int>( std::back_inserter(m_numbl) );
+	
+	//.. skip numbtot
+	ff.skip_n( 1 ); 						
+		
+	std::vector<int> ngridbound;
+	if( m_header.nboundary > 0 ){
+		ff.skip_n( 2 ); 					//.. skip headb and tailb 
+		ff.read<int>( std::back_inserter(ngridbound) ); 				//.. read numbb
+	}
+		
+	ff.skip_n( 6 ); //..skip (1)free_mem+(2)ordering+(3)bound_key+
+					//..     (4)coarse_son+(5)coarse_flag1+(6)coarse_cpu_map
+	
+	
+	if( /*m_minlevel < 1 ||*/ m_maxlevel > m_header.nlevelmax || m_minlevel > m_maxlevel )
+		throw std::runtime_error("RAMSES_amr_level::read_level : requested level is invalid.");
+		
+
+	m_ind_grid_map.insert( std::pair<unsigned,unsigned>(0,ENDPOINT) );
+	
+	FortranUnformatted::streampos spos = ff.tellg();
+	
+	m_minlevel = 0;
+	
+	//... create indexing map ...//
+	for( int ilvl = 0; ilvl<=std::min(m_maxlevel+1, m_header.nlevelmax); ++ilvl ){
+		unsigned gridoff = 0;
+		for( int icpu=0; icpu<m_header.ncpu+m_header.nboundary; ++icpu ){
+			if( icpu < m_header.ncpu && m_numbl[ACC_NL(icpu,ilvl)] == 0 )
+				continue;
+			else if( icpu >= m_header.ncpu && ngridbound[ACC_NB(icpu-m_header.ncpu,ilvl)] == 0 )
+				continue;
+				
+			if( ilvl >= m_minlevel ){
+				std::vector<int> ind_grid;
+				ff.read<int>( std::back_inserter(ind_grid) );
+				for( unsigned i=0; i<ind_grid.size(); ++i ){
+					m_ind_grid_map.insert( std::pair<unsigned,unsigned>(ind_grid[i],gridoff++) );
+				}
+				ind_grid.clear();
+			}else
+				ff.skip();
+				
+			ff.skip_n( 3+3+6+8+8+8 );
+		}
+		if( ff.eof() ){
+			//std::cerr << "eof reached in fortran read operation\n";
+			m_maxlevel = ilvl;//+1;
+			break;
+		}
+	}
+	
+	ff.seekg( spos );
+	
+	m_AMR_levels.clear();
+	
+	 
+	//... loop over levels ...//
+	for( int ilvl = 0; ilvl<=m_maxlevel; ++ilvl ){
+		m_AMR_levels.push_back( Level_(ilvl) );
+		Level_ &currlvl = m_AMR_levels.back();
+			
+		for( int icpu=0; icpu<m_header.ncpu+m_header.nboundary; ++icpu ){
+			if( icpu < m_header.ncpu && m_numbl[ACC_NL(icpu,ilvl)] == 0 )
+				continue;
+			else if( icpu >= m_header.ncpu && ngridbound[ACC_NB(icpu-m_header.ncpu,ilvl)] == 0 )
+				continue;
+			
+			if( ilvl >= m_minlevel ){
+				unsigned gridoff = currlvl.size();
+			
+				std::vector<int> ind_grid;
+				ff.read<int>( std::back_inserter(ind_grid) );
+				for( unsigned i=0; i<ind_grid.size(); ++i ){
+					currlvl.register_cell( Cell_() );
+					//.. also set owning cpu in this loop...
+					currlvl[ i+gridoff ].m_cpu = icpu+1;
+				}
+			
+				//... pointers to next and previous octs ..//
+				ff.skip();
+				ff.skip();
+			
+				//... oct x-coordinates ..//
+				std::vector<float> ftmp;
+				ff.read<double>( std::back_inserter(ftmp) );
+				for( unsigned j=0; j<ftmp.size(); ++j )
+					currlvl[ j+gridoff ].m_xg[0] = ftmp[j];
+				ftmp.clear();
+			
+				//... oct y-coordinates ..//
+				ff.read<double>( std::back_inserter(ftmp) );
+				for( unsigned j=0; j<ftmp.size(); ++j )
+					currlvl[ j+gridoff ].m_xg[1] = ftmp[j];
+				ftmp.clear();
+			
+				//... oct y-coordinates ..//
+				ff.read<double>( std::back_inserter(ftmp) );
+				for( unsigned j=0; j<ftmp.size(); ++j )
+					currlvl[ j+gridoff ].m_xg[2] = ftmp[j];
+				ftmp.clear();
+			
+			
+				//... father indices
+				if( is_locally_essential<Cell_>::check ){
+					ff.read<int>( std::back_inserter(itmp) ); 
+					for( unsigned j=0; j<itmp.size(); ++j ){
+						currlvl[ j+gridoff ].m_pos    = (itmp[j]-m_ncoarse-1)/m_header.ngridmax;
+						currlvl[ j+gridoff ].m_father = m_ind_grid_map[ (itmp[j]-m_ncoarse)%m_header.ngridmax ];
+					}
+					itmp.clear();
+				}else
+					ff.skip();
+				
+				
+				//... neighbour grids indices
+				if( is_locally_essential<Cell_>::check )
+				{
+					for( unsigned k=0; k<6; ++k ){
+						ff.read<int>( std::back_inserter(itmp) );
+						for( unsigned j=0; j<itmp.size(); ++j )
+							currlvl[j+gridoff].m_neighbour[k] = m_ind_grid_map[ (itmp[j]-m_ncoarse)%m_header.ngridmax ];
+						itmp.clear();
+					}
+				}else
+					ff.skip_n( 6 );
+					
+			
+				//... son cell indices
+				for( unsigned ind=0; ind<8; ++ind ){
+					ff.read<int>( std::back_inserter(itmp) );
+					for( unsigned k=0; k<itmp.size(); ++k ){
+						currlvl[k+gridoff].m_son[ind] = m_ind_grid_map[ itmp[k] ];
+					}
+					itmp.clear();
+				}
+
+				//.. skip cpu + refinement map
+				ff.skip_n( 8+8 );
+			}else{
+				//...skip entire record
+				ff.skip_n( 3+3+1+6+8+8+8 );		
+			}
+		}
+	}
+}
+
+} //namespace AMR
+} //namespace RAMSES
+
+
+
+#undef FIX
+
+#endif //__RAMSES_AMR_DATA_HH

Added: trunk/yt/ramses_headers/RAMSES_hydro_data.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_hydro_data.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,666 @@
+/*
+		This file is part of libRAMSES++
+			a C++ library to access snapshot files
+			generated by the simulation code RAMSES by R. Teyssier
+
+    Copyright (C) 2008-09  Oliver Hahn, ojha at gmx.de
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __RAMSES_HYDRO_DATA_HH
+#define __RAMSES_HYDRO_DATA_HH
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <cmath>
+
+#include "FortranUnformatted_IO.hh"
+#include "RAMSES_info.hh"
+#include "RAMSES_amr_data.hh"
+
+namespace RAMSES{
+namespace HYDRO{
+
+//! internal hydro variable indices
+enum hydro_var
+{
+	density     = 1,
+	velocity_x  = 2,
+	velocity_y  = 3,
+	velocity_z  = 4,
+	pressure    = 5,
+	metallicity = 6
+};
+
+//! names of possible variables stored in a RAMSES hydro file
+const char ramses_hydro_variables[][64] = {
+	{"density"},
+	{"velocity_x"},
+	{"velocity_y"},
+	{"velocity_z"},
+	{"pressure"},
+	{"metallicity"} };
+
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+/*!
+ * @class RAMSES::HYDRO::proto_data
+ * @brief base class for all things hydro
+ *
+ * This class provides the base class for all cell based variables defined
+ * on the AMR mesh, accessible through the tree data structure.
+ * @sa RAMSES::HYDRO::data, RAMSES::HYDRO::empty_data
+ */
+template< typename TreeType_, typename ValueType_=double >
+class proto_data{
+
+public:
+	std::vector< std::vector<ValueType_> > m_var_array;
+protected:
+	TreeType_& m_tree;  //!< reference to underlying AMR tree structure
+	unsigned			m_cpu;			//!< the computational domain
+	unsigned
+		m_minlevel, 				//!< maximum refinement level to be read from file
+		m_maxlevel;					//!< minimum refinement level to be read from file
+	unsigned 			m_twotondim;//!< 2**ndim
+	unsigned			m_ilevel; 	//!< the refinement level
+
+	//! array holding the actual data
+
+public:
+	
+	//! constructor for the base class of all hydro data objects
+	/*! 
+	 * @param AMRtree reference to the underlying AMR tree data structure object
+	 */
+	explicit proto_data( TreeType_& AMRtree )
+	: m_tree(AMRtree), m_cpu( AMRtree.m_cpu ), 
+	  m_minlevel( AMRtree.m_minlevel ), m_maxlevel( AMRtree.m_maxlevel ),
+		m_twotondim( (unsigned)(pow(2, AMRtree.m_header.ndim)+0.5) )
+	{ }
+
+	//! access the value of the cells associated with the oct designated by the iterator
+	/*!
+	 * @param it the grid iterator pointing to the current oct
+	 * @param ind index of the child cell of the current oct (0..7)
+	 */
+	inline ValueType_& cell_value( const typename TreeType_::iterator& it, int ind )
+	{
+		unsigned ipos   = it.get_absolute_position();
+		unsigned ilevel = it.get_level();//-m_minlevel;
+		return (m_var_array[ilevel])[m_twotondim*ipos+ind];
+	}
+
+	//! access the value of the cells associated with the oct designated by the iterator
+	/*!
+	 * @param it the grid iterator pointing to the current oct
+	 * @param ind index of the child cell of the current oct (0..7)
+	 */
+	inline ValueType_& operator()( const typename TreeType_::iterator& it, int ind )
+	{	return cell_value(it,ind); }
+	
+	
+	//! combines all elements of this instance with that of another using a binary operator
+	/*!
+	 * @param o  the other data object with which to combine the elements
+	 * @param op the binary operator to be used in combining elements
+	 */
+	template<typename BinaryOperator_>
+	void combine_with( const proto_data<TreeType_,ValueType_>& o, const BinaryOperator_& op )
+	{
+		if( m_minlevel != o.m_minlevel || m_maxlevel != o.m_maxlevel 
+				|| m_twotondim != o.m_twotondim || &m_tree != &o.m_tree 
+				|| m_var_array.size() != o.m_var_array.size() ){
+			std::cerr << "this #levels=" << m_var_array.size() << ", other #levels=" << o.m_var_array.size() << std::endl;
+			throw std::runtime_error("Error: trying to combine incompatible mesh data.");
+		}
+		
+		for( unsigned ilvl=0; ilvl<m_var_array.size(); ++ilvl )
+		{
+			if( m_var_array[ilvl].size() != o.m_var_array[ilvl].size() ){
+				std::cerr << "ilvl=" << ilvl << ", this size=" << m_var_array[ilvl].size() << ", other size=" << o.m_var_array[ilvl].size() << std::endl;
+				throw std::runtime_error("Error: trying to combine incompatible mesh data.");
+			}
+			
+			for( unsigned i=0; i<m_var_array[ilvl].size(); ++i )
+				m_var_array[ilvl][i] = op( m_var_array[ilvl][i], o.m_var_array[ilvl][i] );
+		}
+		
+	}
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+/*!
+ * @class RAMSES::HYDRO::empty_data
+ * @brief encapsulates additional (non-RAMSES) hydro data
+ *
+ * This class provides a wrapper for additional hydro variables created
+ * during post-processing. They are stored compatibly with the normal
+ * RAMSES hydro variables and thus can be accessed via the tree.
+ * @sa RAMSES::HYDRO::data, RAMSES::HYDRO::proto_data
+ */
+template< typename TreeType_, typename ValueType_=double >
+class empty_data : public proto_data<TreeType_,ValueType_>{
+	
+public:
+
+	//! constructor for an empty hydro variable defined on the AMR mesh
+	/*!
+	 * @param AMRtree the underlying hierarchical tree data structure
+	 * @param v the value with which the variable should be initialized, default is a (double) zero.
+	 */
+	explicit empty_data( TreeType_& AMRtree, ValueType_ v=(ValueType_)0.0 )
+	: proto_data<TreeType_,ValueType_>(AMRtree)
+	{
+		this->m_var_array.assign( this->m_maxlevel+1, std::vector<ValueType_>() );
+		
+		for( unsigned ilvl = this->m_minlevel; ilvl<=this->m_maxlevel; ++ilvl ){
+			typename TreeType_::iterator grid_it = this->m_tree.begin( ilvl );
+			while( grid_it != this->m_tree.end(ilvl) ){
+
+				for( unsigned j=0;j<this->m_twotondim;++j )
+					this->m_var_array[ilvl].push_back( v );
+
+				++grid_it;
+			}
+		}
+	}
+
+
+	//! write the new hydro variable to a RAMSES compatible output file
+	/*!
+	 * writes the hydro variable to a RAMSES compatible output file named
+	 * after the convention
+	 *  (path)/(basename)_(DOMAIN).out(snap_num)
+	 *
+	 * @param path the path where to store the files
+	 * @param basename the filename base string to prepend to the domain number
+	 * @param snap_num the number of the snapshot (default is zero).
+	 */
+	void save( std::string path, std::string basename, unsigned snap_num=0 )
+	{
+		char fullname[256];
+		sprintf(fullname,"%s/%s_%05d.out%05d",path.c_str(),basename.c_str(), snap_num, this->m_tree.m_cpu );
+		std::ofstream ofs( fullname, std::ios::binary|std::ios::trunc );
+		
+		
+		typename TreeType_::iterator it;
+		unsigned ncpu = this->m_tree.m_header.ncpu;
+		
+		//std::cerr << "ncpu = " << ncpu << std::endl;
+
+		for( unsigned ilvl = 0; ilvl<=this->m_maxlevel; ++ilvl ){
+			std::vector< std::vector<ValueType_> > 
+				temp1 (ncpu, std::vector<ValueType_>() ),
+				temp2 (ncpu, std::vector<ValueType_>() ),
+				temp3 (ncpu, std::vector<ValueType_>() ),
+				temp4 (ncpu, std::vector<ValueType_>() ),
+				temp5 (ncpu, std::vector<ValueType_>() ),
+				temp6 (ncpu, std::vector<ValueType_>() ),
+				temp7 (ncpu, std::vector<ValueType_>() ),
+				temp8 (ncpu, std::vector<ValueType_>() );
+			
+			it = this->m_tree.begin(ilvl);
+			
+			while( it!= this->m_tree.end(ilvl) )
+			{
+				temp1[ it.get_domain()-1 ].push_back( (*this)(it,0) );
+				temp2[ it.get_domain()-1 ].push_back( (*this)(it,1) );
+				temp3[ it.get_domain()-1 ].push_back( (*this)(it,2) );
+				temp4[ it.get_domain()-1 ].push_back( (*this)(it,3) );
+				temp5[ it.get_domain()-1 ].push_back( (*this)(it,4) );
+				temp6[ it.get_domain()-1 ].push_back( (*this)(it,5) );
+				temp7[ it.get_domain()-1 ].push_back( (*this)(it,6) );
+				temp8[ it.get_domain()-1 ].push_back( (*this)(it,7) );
+				++it;
+			}
+			
+			
+			for( unsigned icpu = 0; icpu<ncpu; ++icpu ){
+				unsigned nn;
+				
+				nn = temp1[icpu].size() * sizeof( ValueType_ );
+				
+				if( nn > 0 )
+				{
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp1[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp2[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp3[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp4[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp5[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp6[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp7[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+					ofs.write( (char*)&temp8[icpu][0], nn );
+					ofs.write( (char*)&nn, sizeof(unsigned) );
+				}
+			}
+		}
+	}
+	
+	//! reads an additional from a RAMSES compatible (single var) output file
+	/*!
+	 * @param basename the base string used for the variable
+	 */
+	void read( std::string basename )
+	{
+		char fullname[256];
+		sprintf(fullname,"%s_%05d.out%05d",basename.c_str(), this->m_tree.m_header.nout[0], this->m_tree.m_cpu );
+		std::ifstream ifs( fullname, std::ios::binary );
+
+		for( unsigned ilvl = this->m_minlevel; ilvl<=this->m_maxlevel; ++ilvl ){
+			unsigned nn = this->m_var_array[ilvl].size() * sizeof(ValueType_);
+			unsigned nnfile;
+			ifs.read( (char*)&nnfile, sizeof(unsigned) );
+			if( nn != nnfile ){
+				std::cerr << "Error: dimension mismatch between AMR tree and file data!" << std::endl;
+				std::cerr << "       found " << nnfile << ", expected " << nn << " in file \'" << fullname << "\'" << std::endl;
+				return;
+			}
+			ifs.read( (char*)&this->m_var_array[ilvl][0], nn );
+			ifs.read( (char*)&nn, sizeof(unsigned) );
+		}
+		ifs.close();
+	}
+
+};
+
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+/*!
+ * @class data
+ * @brief encapsulates hydro data from a RAMSES simulation snapshot
+ *
+ * This class provides low-level read access to RAMSES hydro_XXXXX.out files.
+ * Data from a given list of computational domains can be read and is
+ * stored in internal datastructures.
+ * Access to cell position and threaded tree structure of the cell is provided
+ * through the member functions of class RAMSES_amr_level.
+ * @sa RAMSES_amr_level
+ */
+template< typename TreeType_, typename Real_=double >
+class data : public proto_data<TreeType_,Real_>{
+
+public:
+	struct header{
+		unsigned ncpu;		//!< number of CPUs in simulation
+		unsigned nvar;		//!< number of hydrodynamic variables
+		unsigned ndim;		//!< number of spatial dimensions
+		unsigned nlevelmax;	//!< maximum allowed refinement level
+		unsigned nboundary;	//!< number of boundary regions
+		double gamma;	//!< adiabatic exponent
+	};
+
+
+	std::string 	m_fname;		//!< the file name	
+	struct header	m_header;	 	//!< header meta data
+
+	const unsigned m_nvars; //!< number of variables stored in file
+
+	std::vector<std::string> m_varnames;	//!< names of the variables stored in file
+	std::map<std::string,unsigned> m_var_name_map; //!< a hash table for variable name to internal variable index
+
+
+protected:
+
+	//! generates a hydro_XXXX filename for specified cpu
+	std::string gen_fname( int icpu );
+
+	//! generate hydro_XXXXX filename from info filename
+	std::string rename_info2hydro( const std::string& info );
+
+	//! generate hydro_XXXXX filename from amr filename
+	std::string rename_amr2hydro( const std::string& info );
+
+	//! read header data containing meta information
+	void read_header( void );
+
+	//! get internal index for given variable string identifier
+	/*!
+	 * @param varname the string identifier of the hydro variable
+	 * @return internal variable index
+	 */
+	int get_var_idx( const std::string& varname )
+	{
+		int ivar;
+		std::map<std::string,unsigned>::iterator mit;
+		if( (mit=m_var_name_map.find(varname)) != m_var_name_map.end() )
+			ivar = (*mit).second;
+		else
+			throw std::runtime_error("RAMSES::HYDRO::data::get_var_idx :"\
+				" Error, cannot find variable named \'"+varname+"\'");
+		return ivar;
+	}
+
+	//! perform read operation of one hydro variable (internal use)
+	/*!
+	 * users should always call the read( std::string ) member function
+	 * and read variables through their string identifiers
+	 * @param var the index of the hydro variable
+	 */
+	void read( unsigned var );
+
+public:
+
+	//! constructor for hydro data
+	/*!
+	 * @param AMRtree reference to the underlying tree object
+	 */
+	explicit data( TreeType_& AMRtree )
+	: proto_data<TreeType_,Real_>( AMRtree ),
+	  m_fname( rename_amr2hydro(AMRtree.m_fname) ),
+		m_nvars( 6 )
+	{
+		read_header();
+
+		if( this->m_cpu > m_header.ncpu || this->m_cpu < 1 )
+			throw std::runtime_error("RAMSES::HYDRO::data : expect to read from out of range CPU.");
+
+		if( this->m_minlevel < 0 || this->m_maxlevel >= m_header.nlevelmax )
+			throw std::runtime_error("RAMSES::HYDRO::data : requested level is invalid.");
+
+		//m_twotondim = (unsigned)(pow(2,m_header.ndim)+0.5);
+
+		for( unsigned i=0; i<m_nvars; ++i ){
+				m_var_name_map.insert( std::pair<std::string,unsigned>( ramses_hydro_variables[i], i+1 ) );
+				m_varnames.push_back( ramses_hydro_variables[i] );
+			}
+	}
+
+	//! retrieve the names of variables available in the hydro data source
+	/*! Variables have an internal index but are accessed through their name, given as a string
+	 *  hydro data files may contain different variables depending on the kind of simulation run.
+	 * @param names An output iterator to which std::string designating available variables are sent
+	 * @return Final position of the output iterator
+	 */
+	template< typename _OutputIterator >
+	_OutputIterator get_var_names( _OutputIterator names )
+	{
+		std::vector<std::string>::iterator it( m_varnames.begin() );
+		while( it != m_varnames.end() ){
+			*names = *it;
+			++it; ++names;
+		}
+		return names;
+	}
+
+	//! perform read operation of one hydro variable
+	/*!
+	 * @param varname the string identifier of the hydro variable
+	 */
+	void read( std::string varname )
+	{	this->read( get_var_idx( varname ) );  }
+
+};
+
+
+template< typename TreeType_, typename Real_ >
+void data<TreeType_,Real_>::read_header( void )
+{
+	FortranUnformatted ff( gen_fname( this->m_cpu ) );
+
+	//-- read header data --//
+	ff.read( m_header.ncpu );
+	ff.read( m_header.nvar );
+	ff.read( m_header.ndim );
+	ff.read( m_header.nlevelmax );
+	ff.read( m_header.nboundary );
+	ff.read( m_header.gamma );
+}
+
+
+template< typename TreeType_, typename Real_ >
+void data<TreeType_,Real_>::read( unsigned var )
+{
+	this->m_var_array.clear();
+	//int twotondim = (int)(pow(2,m_header.ndim)+0.5);
+
+	FortranUnformatted ff( gen_fname( this->m_cpu ) );
+
+	//.. skip header entries ..//
+	ff.skip_n_from_start( 6 ); //.. skip header
+
+	if( var < 1 || var > m_header.nvar )
+		throw std::runtime_error("RAMSES::HYDRO::data::read : requested variable is invalid in file \'"+m_fname+"\'.");
+
+	this->m_var_array.clear();
+
+	for( unsigned ilvl = 0; ilvl<=this->m_maxlevel; ++ilvl ){
+
+		this->m_var_array.push_back( std::vector<Real_>() );
+
+		for( unsigned icpu = 0; icpu<m_header.ncpu+m_header.nboundary; ++icpu ){
+
+				unsigned file_ilevel, file_ncache;
+				ff.read(file_ilevel);
+				ff.read(file_ncache);
+
+				if( file_ncache == 0 )
+						continue;
+
+			if( ilvl >= this->m_minlevel ){
+				if( file_ilevel != ilvl+1 )
+					throw std::runtime_error("RAMSES::HYDRO::data::read : corrupted file " \
+						 "or file seek failure in file \'"+m_fname+"\'.");
+
+
+				std::vector<float> tmp;
+				for( unsigned i=0; i<this->m_twotondim; ++i )
+				{
+					ff.skip_n( var-1 );
+					ff.read<double>( std::back_inserter(tmp) );
+					ff.skip_n( m_header.nvar-var );
+				}
+				//.. reorder array to increase data locality..//
+				this->m_var_array.reserve( tmp.size() );
+				for( unsigned i=0; i<file_ncache; ++i ){
+					for( unsigned j=0; j<this->m_twotondim; ++j ){
+						(this->m_var_array.back()).push_back( tmp[i+j*file_ncache] );
+					}
+				}
+			}else{
+				ff.skip_n( this->m_twotondim*m_header.nvar );
+			}
+		}
+	}
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::gen_fname( int icpu )
+{
+	std::string fname;
+	char ext[32];
+	fname = m_fname;
+	fname.erase(fname.rfind('.')+1);
+	sprintf(ext,"out%05d",icpu);
+	fname.append(std::string(ext));
+	return fname;
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::rename_info2hydro( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("info");
+	amr = info.substr(0,ii)+"hydro" + info.substr(ii+4, 6) + ".out00001";
+	return amr;
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::rename_amr2hydro( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("amr");
+	amr = info.substr(0,ii)+"hydro" + info.substr(ii+3, 6) + ".out00001";
+	return amr;
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+
+/*! 
+ * @class RAMSES::HYDRO::multi_domain_data
+ * @brief encapsulates hydro data from multiple computational domain
+ *
+ * This class provides high-level data access to bundled domain data, Bundling
+ * of domain data is useful when analysis of snapshots is performed in parallel
+ * but on a number of cores different than that used for the RAMSES simulation.
+ */
+template< typename TreeType_, typename DataType_, typename ValueType_=double >
+class multi_domain_data
+{
+	public:
+	
+		unsigned m_ndomains;				//!< number of domains bundled
+		RAMSES::snapshot &m_rsnap;			//!< reference to the underlying snapshot object
+		std::vector<DataType_*> m_data;		//!< vector of bundled data objects
+		std::vector<TreeType_*> m_ptrees;	//!< vector of bundled tree objects
+		
+	public:
+	
+		//! constructor for bundled multi-domain hydro variables
+		/*!
+		 * @param rsnap reference to the underlying snapshot object
+		 * @param ptrees vector of trees to be bundled
+		 */
+		multi_domain_data( RAMSES::snapshot& rsnap, std::vector<TreeType_*> ptrees )
+		: m_ndomains( ptrees.size() ), m_rsnap(rsnap), m_ptrees( ptrees )
+		{ 
+			for( unsigned idom=0; idom<m_ndomains; ++idom )
+				m_data.push_back( new DataType_(*m_ptrees[idom]) );
+			
+		}
+		
+		
+		//! combines all elements of this instance with that of another using a binary operator
+		/*!
+	 	 * @param o  the other multi-domain data object with which to combine the elements
+	 	 * @param op the C++ binary operator to be used in combining elements
+	 	 */
+		template<typename BinaryOperator, typename OtherDataType_>
+		void combine_with( const multi_domain_data<TreeType_,OtherDataType_,ValueType_>& o, const BinaryOperator& op )
+		{
+			if( m_data.size() != o.m_data.size() )
+				throw std::runtime_error("Error: trying to combine incompatible multi_domain_data.");
+			
+			for( unsigned i=0; i<m_data.size(); ++i ) //<OtherDataType_,BinaryOperator>
+				m_data[i]->combine_with( *o.m_data[i], op );
+		}
+		
+		
+		//! bracket operator to access bundled multi-domain data
+		/*!
+		 * @param idomain the domain index (1 based)
+		 * @param it tree iterator pointing to the grid in the tree to be accessed
+		 * @param CellIndex index of the cell in the grid
+		 */
+		ValueType_& operator() ( unsigned idomain, const typename TreeType_::iterator& it, char CellIndex )
+		{
+			if( idomain >= m_data.size() ){
+				std::cerr << "out of range domain: " << idomain << " >= " << m_data.size() << std::endl;
+				idomain=0;
+			}
+			
+			return m_data[idomain]->cell_value(it,CellIndex);
+		}
+		
+		
+		//! bracket operator to access bundled multi-domain data (const)
+		/*!
+		 * @param idomain the domain index (1 based)
+		 * @param it tree const_iterator pointing to the grid in the tree to be accessed
+		 * @param CellIndex index of the cell in the grid
+		 */
+		ValueType_& operator() ( unsigned idomain, const typename TreeType_::const_iterator& it, char CellIndex )
+		{
+			if( idomain >= m_data.size() ){
+				std::cerr << "out of range domain: " << idomain << " >= " << m_data.size() << std::endl;
+				idomain=0;
+			}
+			return m_data[idomain]->cell_value(it,CellIndex);
+		}
+		
+		//! destructor for bundled multi-domain data
+		~multi_domain_data()
+		{
+			for( unsigned idom=0; idom<m_data.size(); ++idom )
+				delete m_data[idom];
+		}
+		
+		
+		//! bundled read functions, reads in multi-domain data
+		void get_var( std::string var_name )
+		{
+			//m_data.clear();
+			if( m_ndomains != m_data.size() ){
+				std::cerr << "Error: Internal consistency check failed in multi_domain_data::get_var.";
+				return;
+			}
+			
+			for( unsigned idom=0; idom<m_ndomains; ++idom )
+			{
+				//m_data.push_back( new DataType_(*m_ptrees[idom]) );
+				m_data[idom]->read(var_name);
+			}
+		}
+
+		//! TBD
+		unsigned size( void ){ return m_data.size(); }
+		
+		//! TBD
+		unsigned size( unsigned idom ) { return m_data.at(idom)->size(); }
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+
+} // namespace HYDRO
+} // namespace RAMSES
+
+#endif //__RAMSES_HYDRO_DATA_HH

Added: trunk/yt/ramses_headers/RAMSES_info.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_info.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,334 @@
+/*
+		This file is part of libRAMSES++ 
+			a C++ library to access snapshot files 
+			generated by the simulation code RAMSES by R. Teyssier
+		
+    Copyright (C) 2008-09  Oliver Hahn, ojha at gmx.de
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __READ_INFO_HH
+#define __READ_INFO_HH
+
+#include <string>
+#include <sstream>
+#include <vector>
+#include <fstream>
+#include <stdexcept>
+#include <cmath>
+
+namespace RAMSES{
+
+#define btest(var,pos) ((var) & (1<<(pos)))
+
+//! the file format version of RAMSES
+enum codeversion{
+	version1, version2, version3
+};
+
+const int state_diagram[12][2][8] = 
+{ 
+	{ { 1, 2, 3, 2, 4, 5, 3, 5}, { 0, 1, 3, 2, 7, 6, 4, 5} },
+	{ { 2, 6, 0, 7, 8, 8, 0, 7}, { 0, 7, 1, 6, 3, 4, 2, 5} },
+	{ { 0, 9,10, 9, 1, 1,11,11}, { 0, 3, 7, 4, 1, 2, 6, 5} },
+	{ { 6, 0, 6,11, 9, 0, 9, 8}, { 2, 3, 1, 0, 5, 4, 6, 7} },
+	{ {11,11, 0, 7, 5, 9, 0, 7}, { 4, 3, 5, 2, 7, 0, 6, 1} },
+	{ { 4, 4, 8, 8, 0, 6,10, 6}, { 6, 5, 1, 2, 7, 4, 0, 3} },
+	{ { 5, 7, 5, 3, 1, 1,11,11}, { 4, 7, 3, 0, 5, 6, 2, 1} },
+  {	{ 6, 1, 6,10, 9, 4, 9,10}, { 6, 7, 5, 4, 1, 0, 2, 3} },
+  { {10, 3, 1, 1,10, 3, 5, 9}, { 2, 5, 3, 4, 1, 6, 0, 7} },
+  { {	4, 4, 8, 8, 2, 7, 2, 3}, { 2, 1, 5, 6, 3, 0, 4, 7} },
+  { {	7, 2,11, 2, 7, 5, 8, 5}, { 4, 5, 7, 6, 3, 2, 0, 1} },
+	{ {10, 3, 2, 6,10, 3, 4, 4}, { 6, 1, 7, 0, 5, 2, 4, 3} }
+};
+	
+/**************************************************************************************\
+\**************************************************************************************/
+	
+/*!
+ * @class RAMSES_snapshot
+ * @brief class providing access to general information about a RAMSES simulation snapshot
+ */
+class snapshot{
+
+protected:
+
+	//! c++ input file stream for stream access to the RAMSES info_XXXXX.txt file
+	std::ifstream m_ifs;
+	
+public:
+
+	//! path and name of the info_XXXXX.txt file of a RAMSES snapshot
+	std::string   m_filename;
+	
+	//! the RAMSES version, takes care of changes in file format
+	codeversion m_version;
+	
+	//! the simulation meta data stored in the info file
+	struct info_data{
+		unsigned ncpu;			//!< the number of CPUs (and thus computational domains) in this simulation
+		unsigned ndim;			//!< the number of spatial dimensions
+		unsigned levelmin;		//!< minimum refinement level (this refinement is thus present everywhere)
+		unsigned levelmax;		//!< maximum refinement level allowed
+		unsigned ngridmax;		//!< maximum number of grid cells stored per CPU
+		unsigned nstep_coarse;	//!< number of coarse time steps performed up to this snapshot
+		double boxlen;			//!< the length of the simulation box in internal units
+		double time;			//!< time stamp of the snapshot
+		double aexp;			//!< cosmological expansion factor of the current snapshot
+		double H0;				//!< value of the Hubble constant for this simulation
+		double omega_m;			//!< value of the total matter density parameter for this simulation
+		double omega_l;			//!< value of the cosmological constant density parameter for this simulation
+		double omega_k;			//!< value of the curvature density parameter for this simulation
+		double omega_b;			//!< value of the baryonic (i.e. collisional) matter density parameter for this simulation
+		double unit_l;			//!< internal length unit 
+		double unit_d;			//!< internal density unit
+		double unit_t;			//!< internal time unit
+	};
+	
+	//! header data entailing time stamp and parameters of the simulation snapshot
+	struct info_data m_header;
+	
+	//! minimum hilbert ordering indices for each domain
+	std::vector<double> ind_min;
+	
+	//! maximum hilbert ordering indices for each domain
+	std::vector<double> ind_max;
+	
+protected:
+	
+	//! tokenize a string 
+	/*!
+	 * @param str input string
+	 * @param tokens reference to an STL vector containing all substrings after return
+	 */
+	void tokenize( const std::string& str, std::vector<std::string>& tokens )
+	{
+		std::string buf;
+    	std::stringstream ss(str);
+		while( ss >> buf )
+			tokens.push_back( buf );
+	}
+	
+	//! templated type conversion for string to arbitrary types that can be read from STL stringstreams
+	/*!
+	 * @param str input string
+	 * @param ret reference to the template parameter type return value
+	 */
+	template <typename T>
+	inline void convert( const std::string& str, T& ret )
+	{
+		std::stringstream ss(str);
+		ss >> ret;
+	}
+	
+	//! reads the right-most substring of a line
+	/*!
+	 * &param rhs reference to return value
+	 */
+	template <typename T>
+	void read_line_rhs( T& rhs )
+	{
+		std::vector<std::string> split_buf;
+		std::string buf;
+		std::getline( m_ifs, buf );
+		
+		tokenize( buf, split_buf );
+		convert( split_buf[split_buf.size()-1], rhs );
+	}
+	
+	//! skip a line in the text input file
+	void skip_line( void )
+	{
+		std::string buf;
+		std::getline( m_ifs, buf );
+	}
+	
+	//! parse the RAMSES snapshot info file
+	void parse_file( void )
+	{
+		read_line_rhs( m_header.ncpu );
+		read_line_rhs( m_header.ndim );
+		read_line_rhs( m_header.levelmin );
+		read_line_rhs( m_header.levelmax );
+		read_line_rhs( m_header.ngridmax );
+		read_line_rhs( m_header.nstep_coarse );
+		skip_line();
+		read_line_rhs( m_header.boxlen );
+		read_line_rhs( m_header.time );
+		read_line_rhs( m_header.aexp );
+		read_line_rhs( m_header.H0 );
+		read_line_rhs( m_header.omega_m );
+		read_line_rhs( m_header.omega_l );
+		read_line_rhs( m_header.omega_k );
+		read_line_rhs( m_header.omega_b );
+		read_line_rhs( m_header.unit_l );
+		read_line_rhs( m_header.unit_d );
+		read_line_rhs( m_header.unit_t );
+		skip_line();
+		skip_line();
+		skip_line();
+		
+		for( unsigned i=0; i<m_header.ncpu; ++i ){
+			std::vector<std::string> split_buf;
+			std::string buf;
+			std::getline( m_ifs, buf );
+			tokenize( buf, split_buf );
+			
+			double imin, imax;
+			unsigned domain;
+			
+			convert( split_buf[0], domain );
+			
+			if( domain != i+1 )
+				throw std::runtime_error("RAMSES_snapshot::parse_file : corrupt info file\'"+m_filename+"\'.");
+			
+			convert( split_buf[1], imin );
+			convert( split_buf[2], imax );
+			
+			ind_min.push_back( imin );
+			ind_max.push_back( imax );
+			
+		}
+		
+		
+	}
+	
+	inline unsigned locate( const double x, const std::vector<double>& vx )
+	{
+		long unsigned ju,jm,jl;
+		bool ascnd=(vx[vx.size()-1]>=vx[0]);
+		jl = 0;
+		ju = vx.size()-1;
+		while( ju-jl > 1 ) {
+			jm = (ju+jl)>>1;
+			if( x >= vx[jm] == ascnd )
+				jl = jm;
+			else
+				ju = jm;
+		}
+		return std::max((long unsigned)0,std::min((long unsigned)(vx.size()-2),(long unsigned)jl));
+	}
+	
+public:
+	
+	//! constructor for a RAMSES snapshot meta data object
+	/*!
+	 * @param info_filename path and name of the info_XXXXX.txt file of the RAMSES simulation
+	 */
+	snapshot( std::string info_filename, codeversion ver=RAMSES::version1 )
+	 : m_ifs( info_filename.c_str(), std::ios::in ), m_filename( info_filename ),
+	   m_version( ver )
+	{
+		if(!m_ifs.good())
+			throw std::runtime_error("RAMSES_snapshot : cannot open file \'"+m_filename+"\' for read access.");
+		m_ifs.exceptions ( std::ifstream::eofbit | std::ifstream::failbit | std::ifstream::badbit );
+		
+		parse_file();
+	}
+	
+
+	unsigned get_snapshot_num( void )
+	{
+		unsigned ii = m_filename.rfind("info"), num;
+		std::string tmp( m_filename.substr(ii+5,5) );
+		sscanf(tmp.c_str(),"%d",&num);
+		return num;
+	}
+	
+	unsigned getdomain_bykey( double key )
+	{
+		unsigned i = locate( key, ind_min );
+		return i+1;
+	}
+
+
+};
+
+
+inline void hilbert3d( 
+		const std::vector<double>& x,
+		const std::vector<double>& y,
+		const std::vector<double>& z,
+		std::vector<double>& order,
+		unsigned bit_length )
+{
+	std::vector<bool> i_bit_mask(3*bit_length, false );
+	std::vector<bool> x_bit_mask(1*bit_length, false );
+	std::vector<bool> y_bit_mask(1*bit_length, false );
+	std::vector<bool> z_bit_mask(1*bit_length, false );
+	
+	unsigned npoints = x.size();
+	double lx = pow(2.0,bit_length);
+	
+	for( unsigned ip=0; ip<npoints; ++ip )
+	{
+		for( unsigned i=0; i<bit_length; ++i )
+		{
+			int xx = (int)(x[ip]*lx);
+			x_bit_mask[i] = btest(xx,i);
+			
+			xx = (int)(y[ip]*lx);
+			y_bit_mask[i] = btest(xx,i);
+			
+			xx = (int)(z[ip]*lx);
+			z_bit_mask[i] = btest(xx,i);
+		}
+		
+		for( unsigned i=0; i<bit_length; ++i )
+		{
+			i_bit_mask[3*i+2] = x_bit_mask[i];
+			i_bit_mask[3*i+1] = y_bit_mask[i];
+			i_bit_mask[3*i+0] = z_bit_mask[i];
+		}
+		
+		int nstate, cstate;
+		bool b0, b1, b2;
+		int sdigit, hdigit;
+		
+		cstate=0;
+		for( int i=bit_length-1; i>=0; --i )
+		{
+			b2 = i_bit_mask[3*i+2];
+			b1 = i_bit_mask[3*i+1];
+			b0 = i_bit_mask[3*i+0];
+			
+			sdigit = b2*4+b1*2+b0;
+			nstate = state_diagram[cstate][0][sdigit];
+			hdigit = state_diagram[cstate][1][sdigit];
+			
+			i_bit_mask[3*i+2] = btest(hdigit,2);
+			i_bit_mask[3*i+1] = btest(hdigit,1);
+			i_bit_mask[3*i+0] = btest(hdigit,0);
+			
+			cstate = nstate;
+		}
+		
+		order[ip] = 0.0;
+		for( unsigned i=0; i<3*bit_length; ++i )
+		{
+			b0 = i_bit_mask[i];
+			order[ip] += (double)b0*pow(2.0,i);
+		}
+		
+	}	
+}
+
+
+#undef btest
+
+}
+
+
+#endif

Added: trunk/yt/ramses_headers/RAMSES_particle_data.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_particle_data.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,650 @@
+/*
+		This file is part of libRAMSES++ 
+			a C++ library to access snapshot files 
+			generated by the simulation code RAMSES by R. Teyssier
+		
+    Copyright (C) 2008-09  Oliver Hahn, ojha at gmx.de
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __RAMSES_PARTICLE_DATA_HH
+#define __RAMSES_PARTICLE_DATA_HH
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <algorithm>
+#include <cmath>
+#include <map>
+
+#include "RAMSES_info.hh"
+#include "FortranUnformatted_IO.hh"
+#include "data_iterators.hh"
+
+namespace RAMSES{
+namespace PART{
+
+//! particle type identifiers returned by RAMSES_particle_data::ptype()
+enum ptype_bits {
+	RAMSES_DM_BIT     = 0,
+	RAMSES_STAR_BIT   = 1,
+	RAMSES_DEBRIS_BIT = 2
+};
+
+//! particle type identifiers, can be combined with logical or
+enum ptype { 
+	ptype_dm     = 1L << RAMSES_DM_BIT,			//!< selects dark matter particles
+	ptype_star   = 1L << RAMSES_STAR_BIT,		//!< selects star particles
+	ptype_debris = 1L << RAMSES_DEBRIS_BIT,		//!< selects debris particles
+};
+
+//... zero age of a particle is assumed if age is smaller than this number
+const double zero_age      = 1.0e-8;
+
+
+//... content of RAMSES particle data files is fixed, available fields
+//... are described in the following two fields
+
+		
+//! possible variable names in RAMSES particle files in version 1 and 2 files
+const char ramses_particle_variables_v12[][64] = {
+	{"position_x"},
+	{"position_y"},
+	{"position_z"},
+	{"velocity_x"},
+	{"velocity_y"},
+	{"velocity_z"},
+	{"mass"},
+	{"age"},
+	{"metallicity"},
+	{"particle_ID"},
+	{"refinement_level"} };
+	
+//! possible variable names in RAMSES particle files in version 3 files
+const char ramses_particle_variables_v3[][64] = {
+	{"position_x"},
+	{"position_y"},
+	{"position_z"},
+	{"velocity_x"},
+	{"velocity_y"},
+	{"velocity_z"},
+	{"mass"},
+	{"particle_ID"},
+	{"refinement_level"},
+	{"age"},
+	{"metallicity"} };
+	
+
+/**************************************************************************************\
+\**************************************************************************************/
+	
+/*! 
+ * @class RAMSES::PARTICLE::data
+ * @brief encapsulates particle data from a RAMSES simulation snapshot
+ *
+ * This class provides low-level read access to RAMSES particle files. 
+ * Data from a given list of computational domains can be read and is
+ *stored in internal datastructures.
+ */
+class data {
+protected:
+	const RAMSES::snapshot *m_pRAMSESsnap;	//!< pointer to RAMSES snapshot meta-information
+	std::string m_fname;					//!< path/filename of the .info file
+	int m_cpu;								//!< IDs of computational domains from which data is to be read.
+	unsigned m_nvars;						//!< number of variables available in file
+	std::vector<unsigned> m_nvar_stride; 	//!< access offset to variable locations in file
+	std::vector<int> m_vardim;				//!< spatial dimension of a variable
+	std::vector<std::string> m_varnames;	//!< names of the variables
+
+	//! a hash table for variable name to internal variable index
+	std::map<std::string,unsigned> m_var_name_map;
+	
+	//! particle file header meta data
+	struct header{ 
+		std::vector< int > localseed;		//!< local seed used to generate star particles
+		int ncpu;							//!< number of CPUs in the simulation
+		int ndim;							//!< number of spatial dimensions
+		int nstar_tot;						//!< total number of star particles
+		int nsink;							//!< number of sink particles
+		int npart;							//!< number of particles
+		int npart_dm;						//!< number of dark matter particles
+		int npart_star; 					//!< number of star particles
+		int npart_debris;					//!< number of debris particles
+		double mstar_tot;					//!< total stellar mass
+		double mstar_lost;					//!< stellar mass lost
+	};
+	
+	struct header m_header;					//!< particle file header data
+	
+	
+	//! generate the part_XXXXX.oYYYYY filename for a given CPU ID
+	std::string gen_fname( int icpu );
+	
+	//! generate the part_XXXXX.oYYYYY filename from the info_XXXXX.oYYYYY filename.
+	std::string rename_info2part( const std::string& info );
+	
+	//! Read header from particle files and determine total number of particles.
+	void read_header( void );
+	
+public:
+	
+	//! constructor for a RAMSES particle data interface
+	/*! The constructor checks the validity of the requested computational
+	 *  domains and reads the particle header data to determine the total
+	 *  number of particles.
+   	 * @param snap reference to the underlying snapshot object
+	 * @param cpu the domain number
+     */
+	data( const RAMSES::snapshot& snap, int cpu )
+		: m_pRAMSESsnap( &snap ), 
+		  m_fname( rename_info2part(snap.m_filename) ), 
+		  m_cpu( cpu )
+    { 
+		m_header.npart_dm     = 0;
+		m_header.npart_star   = 0;
+		m_header.npart_debris = 0,
+			
+		read_header();
+		
+		//... compute offsets of variable arrays in file and access hash map ...//
+		m_nvar_stride.push_back(0);
+		
+		
+		switch( m_pRAMSESsnap->m_version )
+		{
+			case RAMSES::version1:
+			case RAMSES::version2:
+		
+				if( m_header.nstar_tot > 0 )
+				{
+					//... file contains star particles ...//
+					m_nvars = 11;
+					for( unsigned i=0; i<m_nvars; ++i ){
+						m_nvar_stride.push_back(m_nvar_stride[i]+1);
+						m_var_name_map.insert( std::pair<std::string,unsigned>( ramses_particle_variables_v12[i], i ) );
+						m_varnames.push_back( ramses_particle_variables_v12[i] );
+					}
+				}
+				else
+				{
+					//... file contains only DM particles, then some fields are not available
+					m_nvars = 9;
+					for( unsigned i=0,j=0; i<m_nvars; ++i,++j ){
+						if( i==7 ) j=9;
+						m_nvar_stride.push_back(m_nvar_stride[i]+1);
+						m_var_name_map.insert( std::pair<std::string,unsigned>( ramses_particle_variables_v12[j], i ) );
+						m_varnames.push_back( ramses_particle_variables_v12[j] );
+					}
+				}
+				
+				break;
+				
+			case RAMSES::version3:
+			
+				if( m_header.nstar_tot > 0 )
+					//... file contains star particles ...//
+					m_nvars = 11;
+				else
+					//... file contains only DM particles, then last fields are not available
+					m_nvars = 9;
+
+				for( unsigned i=0; i<m_nvars; ++i )
+				{
+					m_nvar_stride.push_back(m_nvar_stride[i]+1);
+					m_var_name_map.insert( std::pair<std::string,unsigned>( ramses_particle_variables_v3[i], i ) );
+					m_varnames.push_back( ramses_particle_variables_v3[i] );
+				}
+			
+				break;
+				
+			
+			default:
+				throw std::runtime_error("RAMSES::PART::data: cannot handle data for this RAMSES version.");
+		}
+		
+		//... check if supplied range of domains is valid ...//
+		if( m_cpu < 1 || m_cpu > m_header.ncpu )
+			throw std::runtime_error("RAMSES::PART::data: attempt to read from out of range CPU.");
+	}
+	
+	
+	//=== implementation of interface derived compatible to multi variable data source skeleton ===//
+	//=== see file README.devel, section 1, for details on the interface skeleton               ===//
+	
+	//! retrieve the names of variables available in the particle data source
+	/*! Variables have an internal index but are accessed through their name, given as a string
+	 *  Particle data files may contain different variables depending on the kind of simulation run.
+	 * @param names An output iterator to which std::string designating available variables are sent
+	 * @return Final position of the output iterator
+	 */
+	template< typename _OutputIterator >
+	_OutputIterator get_var_names( _OutputIterator names )
+	{
+		std::vector<std::string>::iterator it( m_varnames.begin() );
+		while( it != m_varnames.end() ){
+			*names = *it;
+			++it; ++names;
+		}
+		return names;
+	}
+	
+protected:
+
+	//! return the internal index for a variable of specified name
+	/*! throws a runtime_error exception if variable name does not exist
+	 * @param varname string designating the variable
+	 * @return internal index of variable
+	 */
+	int get_var_idx( const std::string& varname )
+	{
+		int ivar;
+		std::map<std::string,unsigned>::iterator mit;
+		if( (mit=m_var_name_map.find(varname)) != m_var_name_map.end() )
+			ivar = (*mit).second;
+		else
+			throw std::runtime_error("RAMSES::PART::data::get_var_idx :"\
+				" Error, cannot find variable named \'"+varname+"\'");
+		return ivar;
+	}
+	
+	
+	//! get spatial dimensionality of a variable
+	int get_var_dim( int ivar )
+	{	return m_vardim[ivar]; }
+	
+	
+public:
+	
+	//! get spatial dimensionality of a variable
+	int get_var_dim( const std::string& varname )
+	{ return m_vardim[get_var_idx(varname)]; }
+	
+	
+	//! retrieve data of specified variable from data source using a mask access pattern
+	/*! The data for the specified variable, fixing a spatial dimension, is sent to an
+	 *  output iterator. For each read operation, the mask iterator is checked
+	 *  and the data is discarded if it points to 'false'
+	 * @param varname name of the variable to be retrieved
+	 * @param mask input iterator supplying the mask access pattern
+	 * @param val output iterator to which the retrieved data is sent
+	 * @return final position of the mask iterator
+	 */
+	template< typename _Basetype, typename _InputIterator, typename _OutputIterator >
+	_InputIterator get_var( const std::string& varname, _InputIterator mask, _OutputIterator val )
+	{ 
+		unsigned ivar = get_var_idx( varname );
+		FortranUnformatted ff( gen_fname(m_cpu) );
+		//.. skip header and particle position entries ..//
+		ff.skip_n_from_start( 8+m_nvar_stride[ivar] );
+		mask=ff.read<_Basetype, _InputIterator, _OutputIterator>(mask,val);
+
+		return mask;
+	}
+	
+	
+	//! retrieve data of specified variable from data source
+	/*! The data for the specified variable, fixing a spatial dimension, is sent to an
+	 *  output iterator. 
+	 * @param varname name of the variable to be retrieved
+	 * @param val output iterator to which the retrieved data is sent
+	 * @return final position of the mask iterator
+	 */
+	template< typename _Basetype, typename _OutputIterator >
+	_OutputIterator get_var( const std::string& varname, _OutputIterator val )
+	{ 
+		unsigned ivar = get_var_idx( varname );
+		FortranUnformatted ff( gen_fname(m_cpu) );
+		//.. skip header and particle position entries ..//
+		ff.skip_n_from_start( 8+m_nvar_stride[ivar] );
+		ff.read<_Basetype, _OutputIterator>(val);
+		
+		return val;
+	}
+	
+	
+	
+	//=== the following member functions are simply copied from the skeleton ===//
+	
+		
+	//! generate bit mask for specified particle type
+	/*! returns a particle type identifier for the given particle index
+	 *  this is determined from the age and ID of the particle. Return 
+	 *  value is RAMSES_PTYPE_DM for dark matter particles (age=0,ID>0),
+	 *  RAMSES_PTYPE_STAR for star particles (age>0,ID>0) and 
+	 *  RAMSES PTYPE_DEBRIS for debris type (SN remnant) particles 
+	 *  (age>0,ID=0).
+	 * @param ptype particle type, i.e. RAMSES_PTYPE_DM, RAMSES_PTYPE_STAR or RAMSES_PTYPE_DEBRIS
+	 * @param age_first
+	 * @param age_last
+	 * @param ids_first
+	 * @param mask
+	 * @return particle mask iterator
+	 */
+	template<typename _InputIterator1, typename _InputIterator2, typename _OutputIterator>
+	_OutputIterator mask_particle_type( int ptype, 
+										const _InputIterator1& age_first, const _InputIterator1& age_last,
+										const _InputIterator2& ids_first, _OutputIterator mask )
+	{
+		_InputIterator1 age_it=age_first;
+		_InputIterator2 ids_it=ids_first;
+	
+		switch( ptype )
+		{
+			case ptype_dm:	
+				while( age_it != age_last )
+				{
+					if( fabs(*age_it) <= zero_age )
+						*mask = true;
+					else
+						*mask = false;
+					++mask;	++age_it;
+				}
+				break;
+			
+			case ptype_star:	
+				while( age_it != age_last )
+				{
+					if( fabs(*age_it) > zero_age && (*ids_it) > 0)
+						*mask = true;
+					else
+						*mask = false;
+					++mask;	++age_it;	++ids_it;
+				}
+				break;
+								
+			case ptype_debris:	
+				while( age_it != age_last )
+				{
+					if( fabs(*age_it) > zero_age && (*ids_it) == 0)
+						*mask = true;
+					else
+						*mask = false;
+					++mask;	++age_it;	++ids_it;
+				}
+				break;
+								
+			default: throw std::runtime_error("get_particle_type_mask: Error, invalid particle type.");
+		}
+		
+		return mask;
+	}
+	
+	
+	
+	
+	//! generate bit mask for a specified bounding region
+	/*! Checks whether data points lie within a given region. The _BoundingRegion class object
+	 *  must return true or false for a call of 'region(x,y,z)' if the point (x,y,z)
+	 *  does or does not lie in the desired region.
+	 * @param region A region object used for querying whether points lie in a desired subspace.
+	 * @param x_first Input iterator pointing on first x-coordinate
+	 * @param x_last Input iterator pointing behind last x-coordinate
+	 * @param y_first Input iterator pointing on first y-coordinate
+	 * @param z_first Input iterator pointing on first z-coordinate
+	 * @param mask Output iterator to which true/false values are written
+	 * @return Final state of mask iterator
+	 */
+	template<typename _InputIterator, typename _BoundingRegion, typename _OutputIterator>
+	_OutputIterator mask_region( _BoundingRegion region,
+								 const _InputIterator& x_first, const _InputIterator& x_last,
+								 const _InputIterator& y_first, const _InputIterator& z_first,
+								 _OutputIterator mask )
+	{		
+		_InputIterator xit=x_first, yit=y_first, zit=z_first;
+		
+		while( xit != x_last )
+		{
+			*mask = region( *xit, *yit, *zit );
+			++mask; ++xit; ++yit; ++zit;
+		}
+		
+		return mask;
+	}
+	
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+#define R_CHECK_BIT(var,pos) ((var) & (1<<(pos)))
+
+template< typename Real_, typename IDType_ >
+inline bool is_of_type( Real_ age, IDType_ id, int ptype )
+{
+	//if( ptype==ptype_star )
+	if( R_CHECK_BIT( ptype, RAMSES_STAR_BIT ) )
+		if( fabs(age)>zero_age && id > 0 )
+			return true;
+		//else return false;
+
+	//if( ptype==ptype_dm )
+	if( R_CHECK_BIT( ptype, RAMSES_DM_BIT ) )
+		if( fabs(age)<=zero_age )
+			return true;
+		//else return false;
+			
+	//if( ptype==ptype_debris )
+	if( R_CHECK_BIT( ptype, RAMSES_DEBRIS_BIT ) )
+		if( fabs(age) > zero_age && id == 0 )
+			return true;
+	return false;
+}
+
+#undef R_CHECK_BIT
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+inline std::string data::gen_fname( int icpu )
+{
+	std::string fname;
+	char ext[32];
+	fname = m_fname;
+	fname.erase(fname.rfind('.')+1);
+	sprintf(ext,"out%05d",icpu);
+	fname.append(std::string(ext));
+	return fname;
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+inline void data::read_header( void )
+{
+	FortranUnformatted ff( gen_fname(m_cpu) );	
+	
+	//-- read header data --//
+	ff.read( m_header.ncpu );
+	ff.read( m_header.ndim );
+	ff.read( m_header.npart );
+	ff.read<int>( std::back_inserter(m_header.localseed) );
+	ff.read( m_header.nstar_tot );
+	ff.read( m_header.mstar_tot );
+	ff.read( m_header.mstar_lost );
+	ff.read( m_header.nsink );
+	
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+inline std::string data::rename_info2part( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("info");
+	amr = info.substr(0,ii)+"part" + info.substr(ii+4, 6) + ".out00001";
+	return amr;
+}
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+/*! 
+ * @class RAMSES::PART::multi_domain_data
+ * @brief encapsulates particle data from multiple computational domain
+ *
+ * This class provides high-level data access to bundled domain data, Bundling
+ * of domain data is useful when analysis of snapshots is performed in parallel
+ * but on a number of cores different than that used for the RAMSES simulation.
+ */
+template< typename TreeType_, typename ValueType_=double >
+class multi_domain_data
+{
+	protected:
+		unsigned m_ndomains;							//!< number of domains bundled
+		RAMSES::snapshot &m_rsnap;						//!< reference to the underlying snapshot object
+		std::vector< std::vector<ValueType_> > m_data;	//!< vector of bundled data objects
+		std::vector<TreeType_*> m_ptrees;				//!< vector of bundled tree objects
+		
+	public:
+	
+		//! constructor for bundled multi-domain particle data
+		/*!
+		 * @param rsnap reference to the underlying snapshot object
+		 * @param ptrees vector of trees to be bundled
+		 */
+		multi_domain_data( RAMSES::snapshot& rsnap, std::vector<TreeType_*> ptrees )
+		: m_ndomains( ptrees.size() ), m_rsnap(rsnap), m_ptrees( ptrees )
+		{ }
+		
+		
+		//! access a particle in a specific domain
+		/*!
+		 * @param idomain the domain ID
+		 * @param ind the particle index
+		 */
+		ValueType_& operator() ( unsigned idomain, unsigned ind )
+		{
+			return m_data[idomain][ind];
+		}
+		
+		//! access the vector of particle of a specific domain
+		/*!
+		 * @param idomain the domain ID
+		 */
+		std::vector<ValueType_>& operator() ( unsigned idomain )
+		{
+			return m_data[idomain];
+		}
+		
+		//! bundled read functions, reads in multi-domain data
+		void get_var( std::string var_name )
+		{
+			m_data.clear();
+			for( unsigned idom=0; idom<m_ndomains; ++idom )
+			{
+				data local_data( m_rsnap, m_ptrees[idom]->m_cpu );
+				m_data.push_back( std::vector<ValueType_>() );
+				local_data.get_var<ValueType_>(var_name, std::back_inserter(m_data.back()) );
+			}
+		}
+		
+		//! get the pointer to the vector of particles of a specific domain
+		std::vector<ValueType_>* get_vpointer( unsigned idomain )
+		{
+			return &m_data[idomain];
+		}
+	
+		//! TBD
+		unsigned size( void ){ return m_data.size(); }
+		
+		//! TBD
+		unsigned size( unsigned idom ) { return m_data.at(idom).size(); }
+};
+
+
+}// namespace PARTICLE
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+namespace GEOM{
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+//! TBD
+class bounding_sphere{
+	protected:
+		double 
+			xc,	//!< TBD
+			yc,	//!< TBD
+			zc,	//!< TBD
+			r,	//!< TBD
+			r2;	//!< TBD
+			
+	public:
+		//! TBD
+		bounding_sphere( double _xc, double _yc, double _zc, double _r )
+		: xc(_xc), yc(_yc), zc(_zc), r(_r), r2(r*r)
+		{ }
+		
+		//! TBD
+		inline bool operator()( double x, double y, double z )
+		{
+			double dx(x-xc),dy(y-yc),dz(z-zc);
+			return dx*dx+dy*dy+dz*dz < r2;
+		}
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+//! TBD
+class bounding_box{
+	protected:
+		double 
+			xc,		//!< TBD
+			yc,		//!< TBD
+			zc,		//!< TBD
+			Lx,		//!< TBD
+			Ly,		//!< TBD
+			Lz,		//!< TBD
+			Lx2,	//!< TBD
+			Ly2,	//!< TBD
+			Lz2;	//!< TBD
+	public:
+		//! TBD
+		bounding_box( double _xc, double _yc, double _zc, double _Lx, double _Ly, double _Lz )
+		: xc(_xc), yc(_yc), zc(_zc), Lx(_Lx), Ly(_Ly), Lz(_Lz), Lx2(0.5*Lx), Ly2(0.5*Ly), Lz2(0.5*Lz)
+		{ }
+		
+		//! TBD
+		inline bool operator()( double x, double y, double z )
+		{
+			double dx(x-xc),dy(y-yc),dz(z-zc);
+			return fabs(dx)<Lx2 && fabs(dy)<Ly2 && fabs(dz)<Lz2; 
+		}
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+//! TBD
+class bounding_cube:public bounding_box{
+	public:
+		//! TBD
+		bounding_cube( double _xc, double _yc, double _zc, double _L )
+		: bounding_box( _xc, _yc, _zc, _L, _L, _L )
+		{ }
+};
+
+/**************************************************************************************\
+\**************************************************************************************/
+
+}// namespace GEOMETRY
+
+}// namespace RAMSES
+
+#endif //__RAMSES_PARTICLE_DATA

Added: trunk/yt/ramses_headers/RAMSES_poisson_data.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_poisson_data.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,249 @@
+/*
+		This file is part of libRAMSES++
+			a C++ library to access snapshot files
+			generated by the simulation code RAMSES by R. Teyssier
+
+    Copyright (C) 2008-09  Oliver Hahn, ojha at gmx.de
+
+    This program is free software: you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation, either version 3 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#ifndef __RAMSES_POISSON_DATA_HH
+#define __RAMSES_POISSON_DATA_HH
+
+#include <fstream>
+#include <iostream>
+#include <iomanip>
+#include <vector>
+#include <cmath>
+
+#include "FortranUnformatted_IO.hh"
+#include "RAMSES_info.hh"
+#include "RAMSES_amr_data.hh"
+
+namespace RAMSES{
+namespace POISSON{
+
+
+/*!
+ * @class data
+ * @brief encapsulates hydro data from a RAMSES simulation snapshot
+ *
+ * This class provides low-level read access to RAMSES hydro_XXXXX.out files.
+ * Data from a given list of computational domains can be read and is
+ * stored in internal datastructures.
+ * Access to cell position and threaded tree structure of the cell is provided
+ * through the member functions of class RAMSES_amr_level.
+ * @sa RAMSES_amr_level
+ */
+template< typename TreeType_, typename Real_=double >
+class data : public RAMSES::HYDRO::proto_data<TreeType_,Real_>{
+
+public:
+
+	//! the poisson file header structure
+	struct header{
+		unsigned ncpu;		//!< number of CPUs in simulation
+		unsigned ndim;		//!< number of spatial dimensions
+		unsigned nlevelmax;	//!< maximum allowed refinement level
+		unsigned nboundary;	//!< number of boundary regions
+	};
+
+
+	std::string 	m_fname;		//!< the file name	
+	struct header	m_header;	 	//!< header meta data
+
+protected:
+	//! generates a poisson_XXXX filename for specified cpu
+	std::string gen_fname( int icpu );
+
+	//! generate poisson_XXXXX filename from info filename
+	std::string rename_info2poisson( const std::string& info );
+
+	//! generate poisson_XXXXX filename from amr filename
+	std::string rename_amr2poisson( const std::string& info );
+
+	//! read header data containing meta information
+	void read_header( void );
+
+	//! perform read operation of poisson variables (internal use)
+	/*!
+	 *
+	 */
+	void read( void );
+
+public:
+
+	//! constructor for poisson solver gravity data
+	/*!
+	 * @param AMRtree underlying AMR hierarchical tree data structure
+	 */
+	explicit data( TreeType_& AMRtree )
+	: proto_data<TreeType_,Real_>( AMRtree ),
+	  m_fname( rename_amr2poisson(AMRtree.m_fname) )
+	{
+		read_header();
+
+		if( this->m_cpu > m_header.ncpu || this->m_cpu < 1 )
+			throw std::runtime_error("RAMSES::POISSON::data : expect to read from out of range CPU.");
+
+		if( this->m_minlevel < 0 || this->m_maxlevel >= m_header.nlevelmax )
+			throw std::runtime_error("RAMSES::POISSON::data : requested level is invalid.");
+	}
+	
+	//! access the value of the cells associated with the oct designated by the iterator
+	/*!
+	 * @param it the grid iterator pointing to the current oct
+	 * @param ind index of the child cell of the current oct (0..7)
+	 * @param idir the cartesian direction of the force vector (0..2)
+	 */
+	inline ValueType_& cell_value( const typename TreeType_::iterator& it, int ind, int idir )
+	{
+		unsigned ipos   = it.get_absolute_position();
+		unsigned ilevel = it.get_level();//-m_minlevel;
+		return (m_var_array[ilevel])[3*(m_twotondim*ipos+ind)+idir];
+	}
+	
+	//! access the value of the cells associated with the oct designated by the iterator
+	/*!
+	 * a call to this function will always fail since the poisson files contain only forces so far
+	 * @param it the grid iterator pointing to the current oct
+	 * @param ind index of the child cell of the current oct (0..7)
+	 */
+	inline ValueType_& cell_value( const typename TreeType_::iterator& it, int ind )
+	{
+		throw std::runtime_error("You should not call this two variable cell_value(.,.) function for forces!");
+		return (m_var_array[ilevel])[3*(m_twotondim*ipos+ind)];
+	}
+
+	//! access the value of the cells associated with the oct designated by the iterator
+	/*!
+	 * @param it the grid iterator pointing to the current oct
+	 * @param ind index of the child cell of the current oct (0..7)
+	 * @param idir the cartesian direction of the force vector (0..2)
+	 */
+	inline ValueType_& operator()( const typename TreeType_::iterator& it, int ind, int idir )
+	{	return cell_value(it,ind,idir); }
+
+};
+
+
+template< typename TreeType_, typename Real_ >
+void data<TreeType_,Real_>::read_header( void )
+{
+	FortranUnformatted ff( gen_fname( this->m_cpu ) );
+	
+	//-- read header data --//
+	ff.read( m_header.ncpu );
+	ff.read( m_header.ndim );
+	ff.read( m_header.nlevelmax );
+	ff.read( m_header.nboundary );
+}
+
+
+template< typename TreeType_, typename Real_ >
+void data<TreeType_,Real_>::read( void )
+{
+	this->m_var_array.clear();
+	//int twotondim = (int)(pow(2,m_header.ndim)+0.5);
+
+	FortranUnformatted ff( gen_fname( this->m_cpu ) );
+
+	//.. skip header entries ..//
+	ff.skip_n_from_start( 4 ); //.. skip header
+
+	this->m_var_array.clear();
+
+	for( unsigned ilvl = 0; ilvl<=this->m_maxlevel; ++ilvl ){
+
+		this->m_var_array.push_back( std::vector<Real_>() );
+
+		for( unsigned icpu = 0; icpu<m_header.ncpu+m_header.nboundary; ++icpu ){
+
+				unsigned file_ilevel, file_ncache;
+				ff.read(file_ilevel);
+				ff.read(file_ncache);
+
+				if( file_ncache == 0 )
+						continue;
+
+			if( ilvl >= this->m_minlevel ){
+				if( file_ilevel != ilvl+1 )
+					throw std::runtime_error("RAMSES::POISSON::data::read : corrupted file " \
+						 "or file seek failure in file \'"+m_fname+"\'.");
+
+
+				std::vector<float> tmpx,tmpy,tmpz;
+				for( unsigned i=0; i<this->m_twotondim; ++i )
+				{
+					ff.read<double>( std::back_inserter(tmpx) );	//.. x-force
+					ff.read<double>( std::back_inserter(tmpy) );	//.. y-force
+					ff.read<double>( std::back_inserter(tmpz) );	//.. z-force
+				}
+				//.. reorder array to increase data locality..//
+				this->m_var_array.reserve( 3*tmpx.size() );
+				for( unsigned i=0; i<file_ncache; ++i ){
+					for( unsigned j=0; j<this->m_twotondim; ++j ){
+						//... interleave directional data ...//
+						(this->m_var_array.back()).push_back( tmpx[i+j*file_ncache] );
+						(this->m_var_array.back()).push_back( tmpy[i+j*file_ncache] );
+						(this->m_var_array.back()).push_back( tmpz[i+j*file_ncache] );
+					}
+				}
+			}else{
+				ff.skip_n( this->m_twotondim*3 );
+			}
+		}
+	}
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::gen_fname( int icpu )
+{
+	std::string fname;
+	char ext[32];
+	fname = m_fname;
+	fname.erase(fname.rfind('.')+1);
+	sprintf(ext,"out%05d",icpu);
+	fname.append(std::string(ext));
+	return fname;
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::rename_info2hydro( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("info");
+	amr = info.substr(0,ii)+"poisson" + info.substr(ii+4, 6) + ".out00001";
+	return amr;
+}
+
+
+template< typename TreeType_, typename Real_ >
+std::string data<TreeType_,Real_>::rename_amr2hydro( const std::string& info )
+{
+	std::string amr;
+	unsigned ii = info.rfind("amr");
+	amr = info.substr(0,ii)+"poisson" + info.substr(ii+3, 6) + ".out00001";
+	return amr;
+}
+
+
+} // namespace POISSON
+} // namespace RAMSES
+
+
+#endif // .. __RAMSES_POISSON_DATA_HH

Added: trunk/yt/ramses_headers/RAMSES_typedefs.h
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/RAMSES_typedefs.h	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,21 @@
+#include <string>
+#include <vector>
+#include <cstdio>
+#include <iostream>
+#include <fstream>
+
+#include "RAMSES_amr_data.hh"
+#include "RAMSES_hydro_data.hh"
+
+//... define the RAMSES base cell type to be of cell_locally_essential type
+//... - this type allows moving between refinement levels
+namespace RAMSES {
+  namespace AMR{
+    typedef RAMSES::AMR::cell_locally_essential<unsigned, float> RAMSES_cell;
+    typedef RAMSES::AMR::level<RAMSES_cell> RAMSES_level;
+    typedef RAMSES::AMR::tree< RAMSES_cell, RAMSES::AMR::level< RAMSES_cell > > RAMSES_tree;
+  }
+  namespace HYDRO {
+    typedef RAMSES::HYDRO::data< RAMSES::AMR::RAMSES_tree > RAMSES_hydro_data;
+  }
+}

Added: trunk/yt/ramses_headers/README
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/README	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,9 @@
+These are Header Files written by Oliver Hahn for reading RAMSES data.  They do
+not need to be installed, as they full contain the entire RamsesRead++ library.
+
+The library may also be downloaded independently from:
+
+  http://www.exp-astro.phys.ethz.ch/hahn/RamsesTools/
+
+Author: Oliver Hahn <ohahn at stanford.edu>
+Minor Modifications: Matthew Turk <matthewturk at gmail.com>

Added: trunk/yt/ramses_headers/data_iterators.hh
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_headers/data_iterators.hh	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,74 @@
+#ifndef __DATA_ITERATORS_HH
+#define __DATA_ITERATORS_HH
+
+#include <iterator>
+/*template<typename _Container,typename _InputIterator>
+class conditional_iterator
+: public _Container::iterator
+{
+	
+	
+};*/
+
+//! TBD
+template<typename _Container,typename _InputIterator>
+class conditional_back_insert_iterator
+: public std::iterator<std::output_iterator_tag, void, void, void, void>
+{
+	protected:
+		_Container* container;			//!< TBD
+		_InputIterator cond_iterator;	//!< TBD
+	
+	public:
+		//! TBD
+		typedef _Container		container_type;
+		
+		//! TBD
+		conditional_back_insert_iterator(_Container& __x,const _InputIterator& __i, int __prealloc=0) 
+		:	container(&__x), cond_iterator(__i) 
+		{ 
+			if( __prealloc > 0 )
+				__x.reserve( __x.size()+__prealloc);
+		}
+		
+		/*conditional_back_insert_iterator( const conditional_back_insert_iterator& __it )
+		: container( __it.container ), cond_iterator( __it.cond_iterator )
+		{ }*/
+		
+		//! TBD
+		conditional_back_insert_iterator&
+		operator=( typename _Container::const_reference __value )
+		{
+			if( *cond_iterator )
+				container->push_back(__value);
+			++cond_iterator;
+			return *this;
+		}
+		
+		//! TBD
+		conditional_back_insert_iterator&
+		operator*()
+		{ return *this; }
+		
+		//! TBD
+		conditional_back_insert_iterator&
+		operator++()
+		{ return *this; }
+		
+		//! TBD
+		conditional_back_insert_iterator&
+		operator++(int)
+		{ return *this; }
+};
+
+
+//! TBD
+template<typename _Container, typename _InputIterator>
+inline conditional_back_insert_iterator<_Container, _InputIterator>
+conditional_back_inserter(_Container& __x, const _InputIterator& __i, int __prealloc=0)
+{ return conditional_back_insert_iterator<_Container,_InputIterator>(__x,__i,__prealloc); }
+
+
+
+
+#endif

Added: trunk/yt/ramses_reader.pyx
==============================================================================
--- (empty file)
+++ trunk/yt/ramses_reader.pyx	Tue Aug 10 16:02:10 2010
@@ -0,0 +1,804 @@
+"""
+Wrapping code for Oliver Hahn's RamsesRead++
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: UCSD
+Author: Oliver Hahn <ohahn at stanford.edu>
+Affiliation: KIPAC / Stanford
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+# Cython wrapping code for Oliver Hahn's RAMSES reader
+from cython.operator cimport dereference as deref, preincrement as inc
+from libc.stdlib cimport malloc, free, abs, calloc, labs
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+cdef inline np.int64_t i64max(np.int64_t i0, np.int64_t i1):
+    if i0 > i1: return i0
+    return i1
+
+cdef inline np.int64_t i64min(np.int64_t i0, np.int64_t i1):
+    if i0 < i1: return i0
+    return i1
+
+cdef extern from "<vector>" namespace "std":
+    cdef cppclass vector[T]:
+        cppclass iterator:
+            T operator*()
+            iterator operator++()
+            bint operator==(iterator)
+            bint operator!=(iterator)
+        vector()
+        void push_back(T&)
+        T& operator[](int)
+        T& at(int)
+        iterator begin()
+        iterator end()
+
+cdef extern from "<map>" namespace "std":
+    cdef cppclass map[A,B]:
+        pass
+
+cdef extern from "string" namespace "std":
+    cdef cppclass string:
+        string(char *cstr)
+        char *c_str()
+        string operator*()
+
+cdef extern from "RAMSES_typedefs.h":
+    pass
+
+cdef extern from "RAMSES_info.hh" namespace "RAMSES":
+    enum codeversion:
+        version1
+        version2
+        version3
+
+    cdef cppclass snapshot:
+        string m_filename
+        codeversion m_version
+        cppclass info_data:
+            unsigned ncpu
+            unsigned ndim
+            unsigned levelmin
+            unsigned levelmax
+            unsigned ngridmax
+            unsigned nstep_coarse
+            double boxlen
+            double time
+            double aexp
+            double H0
+            double omega_m
+            double omega_l
+            double omega_k
+            double omega_b
+            double unit_l
+            double unit_d
+            double unit_t
+
+        info_data m_header
+        vector[double] ind_min
+        vector[double] ind_max
+
+        snapshot(string info_filename, codeversion ver)
+
+        unsigned get_snapshot_num()
+        unsigned getdomain_bykey( double key )
+
+    #void hilbert3d(vector[double] &x, vector[double] &y, vector[double] &z,
+    #               vector[double] &order, unsigned bit_length)
+
+cdef extern from "RAMSES_amr_data.hh" namespace "RAMSES::AMR":
+    cdef cppclass vec[real_t]:
+        real_t x, y, z
+        vec( real_t x_, real_t y_, real_t z_)
+        vec ( vec& v)
+        vec ( )
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass cell_locally_essential[id_t, real_t]:
+        id_t m_neighbor[6]
+        id_t m_father
+        id_t m_son[8]
+        real_t m_xg[3]
+        id_t m_cpu
+
+        char m_pos
+
+        cell_locally_essential()
+
+        bint is_refined(int ison)
+
+    cdef cppclass RAMSES_cell:
+        unsigned m_neighbour[6]
+        unsigned m_father
+        unsigned m_son[8]
+        float m_xg[3]
+        unsigned m_cpu
+
+        char m_pos
+
+        RAMSES_cell()
+
+        bint is_refined(int ison)
+
+    cdef cppclass cell_simple[id_t, real_t]:
+        id_t m_son[1]
+        real_t m_xg[3]
+        id_t m_cpu
+
+        char m_pos
+        cell_simple()
+        bint is_refined(int ison)
+
+    # AMR level implementation
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass level[Cell_]:
+        unsigned m_ilevel
+        vector[Cell_] m_level_cells
+        double m_xc[8]
+        double m_yc[8]
+        double m_zc[8]
+
+        # I skipped the typedefs here
+
+        double m_dx
+        unsigned m_nx
+        level (unsigned ilevel)
+
+        void register_cell( Cell_ cell )
+        vector[Cell_].iterator begin()
+        vector[Cell_].iterator end()
+
+        Cell_& operator[]( unsigned i)
+        unsigned size()
+
+    cdef cppclass RAMSES_level:
+        unsigned m_ilevel
+        vector[RAMSES_cell] m_level_cells
+        double m_xc[8]
+        double m_yc[8]
+        double m_zc[8]
+
+        # I skipped the typedefs here
+
+        double m_dx
+        unsigned m_nx
+        RAMSES_level (unsigned ilevel)
+
+        void register_cell( RAMSES_cell cell )
+        vector[RAMSES_cell].iterator begin()
+        vector[RAMSES_cell].iterator end()
+
+        RAMSES_cell& operator[]( unsigned i)
+        unsigned size()
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass tree[Cell_, Level_]:
+        cppclass header:
+            vector[int] nx
+            vector[int] nout
+            vector[int] nsteps
+            int ncpu
+            int ndim
+            int nlevelmax
+            int ngridmax
+            int nboundary
+            int ngrid_current
+            double boxlen
+            vector[double] tout
+            vector[double] aout
+            vector[double] dtold
+            vector[double] dtnew
+            vector[double] cosm
+            vector[double] timing
+            double t
+            double mass_sph
+
+        vector[Level_] m_AMR_levels
+        vector[unsigned] mheadl, m_numbl, m_taill
+
+        int m_cpu
+        int m_minlevel
+        int m_maxlevel
+        string m_fname
+        unsigned m_ncoarse
+        header m_header
+
+        # This is from later on in the .hh file ... I don't think we need them
+        # typedef proto_iterator<const tree*> const_iterator;
+        # typedef proto_iterator<tree *> iterator;
+
+        tree (snapshot &snap, int cpu, int maxlevel, int minlevel = 1)
+
+    cppclass tree_iterator "RAMSES::AMR::RAMSES_tree::iterator":
+        tree_iterator operator*()
+        RAMSES_cell operator*()
+        tree_iterator begin()
+        tree_iterator end()
+        tree_iterator to_parent()
+        tree_iterator get_parent()
+        void next()
+        bint operator!=(tree_iterator other)
+        unsigned get_cell_father()
+        bint is_finest(int ison)
+        int get_absolute_position()
+
+    cdef cppclass RAMSES_tree:
+        # This is, strictly speaking, not a header.  But, I believe it is
+        # going to work alright.
+        cppclass header:
+            vector[int] nx
+            vector[int] nout
+            vector[int] nsteps
+            int ncpu
+            int ndim
+            int nlevelmax
+            int ngridmax
+            int nboundary
+            int ngrid_current
+            double boxlen
+            vector[double] tout
+            vector[double] aout
+            vector[double] dtold
+            vector[double] dtnew
+            vector[double] cosm
+            vector[double] timing
+            double t
+            double mass_sph
+
+        vector[RAMSES_level] m_AMR_levels
+        vector[unsigned] mheadl, m_numbl, m_taill
+
+        int m_cpu
+        int m_minlevel
+        int m_maxlevel
+        string m_fname
+        unsigned m_ncoarse
+        header m_header
+
+        unsigned size()
+
+        # This is from later on in the .hh file ... I don't think we need them
+        # typedef proto_iterator<const tree*> const_iterator;
+        # typedef proto_iterator<tree *> iterator;
+
+        RAMSES_tree(snapshot &snap, int cpu, int maxlevel, int minlevel)
+        void read()
+
+        tree_iterator begin(int ilevel)
+        tree_iterator end(int ilevel)
+
+        tree_iterator begin()
+        tree_iterator end()
+
+        vec[double] cell_pos_double "cell_pos<double>" (tree_iterator it, unsigned ind) 
+        vec[double] grid_pos_double "grid_pos<double>" (tree_iterator it)
+        vec[float] cell_pos_float "cell_pos<float>" (tree_iterator it, unsigned ind) 
+        vec[float] grid_pos_float "grid_pos<float>" (tree_iterator it)
+
+cdef extern from "RAMSES_amr_data.hh" namespace "RAMSES::HYDRO":
+    enum hydro_var:
+        density
+        velocity_x
+        velocity_y
+        velocity_z
+        pressure
+        metallicit
+
+    char ramses_hydro_variables[][64]
+
+    # There are a number of classes here that we could wrap and utilize.
+    # However, I will only wrap the methods I know we need.
+
+    # I have no idea if this will work.
+    cdef cppclass TreeTypeIterator[TreeType_]:
+        pass
+
+    # This class definition is out of date.  I have unrolled the template
+    # below.
+    cdef cppclass data[TreeType_, Real_]:
+        cppclass header:
+            unsigned ncpu
+            unsigned nvar
+            unsigned ndim
+            unsigned nlevelmax
+            unsigned nboundary
+            double gamma
+        string m_fname
+        header m_header
+
+        # I don't want to implement proto_data, so we put this here
+        Real_& cell_value(TreeTypeIterator[TreeType_] &it, int ind)
+
+        unsigned m_nvars
+        vector[string] m_varnames
+        map[string, unsigned] m_var_name_map
+
+        data(TreeType_ &AMRtree)
+        #_OutputIterator get_var_names[_OutputIterator](_OutputIterator names)
+        void read(string varname)
+
+    cdef cppclass RAMSES_hydro_data:
+        cppclass header:
+            unsigned ncpu
+            unsigned nvar
+            unsigned ndim
+            unsigned nlevelmax
+            unsigned nboundary
+            double gamma
+        string m_fname
+        header m_header
+
+        # I don't want to implement proto_data, so we put this here
+        double cell_value (tree_iterator &it, int ind)
+
+        unsigned m_nvars
+        vector[string] m_varnames
+        map[string, unsigned] m_var_name_map
+
+        RAMSES_hydro_data(RAMSES_tree &AMRtree)
+        #_OutputIterator get_var_names[_OutputIterator](_OutputIterator names)
+        void read(string varname)
+
+        vector[vector[double]] m_var_array
+
+cdef class RAMSES_tree_proxy:
+    cdef string *snapshot_name
+    cdef snapshot *rsnap
+
+    cdef RAMSES_tree** trees
+    cdef RAMSES_hydro_data*** hydro_datas
+
+    cdef int *loaded
+
+    cdef public object field_ind
+    cdef public object field_names
+
+    # We will store this here so that we have a record, independent of the
+    # header, of how many things we have allocated
+    cdef int ndomains, nfields
+    
+    def __cinit__(self, char *fn):
+        cdef int idomain, ifield, ii
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        self.snapshot_name = new string(fn)
+        self.rsnap = new snapshot(deref(self.snapshot_name), version3)
+        # We now have to get our field names to fill our array
+        self.trees = <RAMSES_tree**>\
+            malloc(sizeof(RAMSES_tree*) * self.rsnap.m_header.ncpu)
+        self.hydro_datas = <RAMSES_hydro_data ***>\
+                       malloc(sizeof(RAMSES_hydro_data**) * self.rsnap.m_header.ncpu)
+        self.ndomains = self.rsnap.m_header.ncpu
+        #for ii in range(self.ndomains): self.trees[ii] = NULL
+        for idomain in range(1, self.rsnap.m_header.ncpu + 1):
+            local_tree = new RAMSES_tree(deref(self.rsnap), idomain,
+                                         self.rsnap.m_header.levelmax, 0)
+            local_tree.read()
+            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            self.hydro_datas[idomain - 1] = <RAMSES_hydro_data **>\
+                malloc(sizeof(RAMSES_hydro_data*) * local_hydro_data.m_nvars)
+            del local_hydro_data
+            for ii in range(local_hydro_data.m_nvars):
+                self.hydro_datas[idomain - 1][ii] = \
+                    new RAMSES_hydro_data(deref(local_tree))
+            self.trees[idomain - 1] = local_tree
+            # We do not delete anything
+        # Only once, we read all the field names
+        self.nfields = local_hydro_data.m_nvars
+        cdef string *field_name
+        self.field_names = []
+        self.field_ind = {}
+        self.loaded = <int *> malloc(sizeof(int) * local_hydro_data.m_nvars)
+        for ifield in range(local_hydro_data.m_nvars):
+            field_name = &(local_hydro_data.m_varnames[ifield])
+            # Does this leak?
+            self.field_names.append(field_name.c_str())
+            self.field_ind[self.field_names[-1]] = ifield
+            self.loaded[ifield] = 0
+        # This all needs to be cleaned up in the deallocator
+
+    def __dealloc__(self):
+        cdef int idomain, ifield
+        for idomain in range(self.ndomains):
+            for ifield in range(self.nfields):
+                if self.hydro_datas[idomain][ifield] != NULL:
+                    del self.hydro_datas[idomain][ifield]
+            if self.trees[idomain] != NULL:
+                del self.trees[idomain]
+            free(self.hydro_datas[idomain])
+        free(self.trees)
+        free(self.hydro_datas)
+        free(self.loaded)
+        if self.snapshot_name != NULL: del self.snapshot_name
+        if self.rsnap != NULL: del self.rsnap
+        
+    def count_zones(self):
+        # We need to do simulation domains here
+
+        cdef unsigned idomain, ilevel
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        cdef RAMSES_level *local_level
+
+        # All the loop-local pointers must be declared up here
+
+        cell_count = []
+        for ilevel in range(self.rsnap.m_header.levelmax + 1):
+            cell_count.append(0)
+        for idomain in range(1, self.rsnap.m_header.ncpu + 1):
+            local_tree = new RAMSES_tree(deref(self.rsnap), idomain,
+                                         self.rsnap.m_header.levelmax, 0)
+            local_tree.read()
+            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            for ilevel in range(local_tree.m_maxlevel + 1):
+                local_level = &local_tree.m_AMR_levels[ilevel]
+                cell_count[ilevel] += local_level.size()
+            del local_tree, local_hydro_data
+
+        return cell_count
+
+    def ensure_loaded(self, char *varname, int domain_index):
+        # this domain_index must be zero-indexed
+        cdef int varindex = self.field_ind[varname]
+        cdef string *field_name = new string(varname)
+        if self.loaded[varindex] == 1: return
+        print "READING FROM DISK", varname
+        self.hydro_datas[domain_index][varindex].read(deref(field_name))
+        self.loaded[varindex] = 1
+        del field_name
+
+    def clear_tree(self, char *varname, int domain_index):
+        # this domain_index must be zero-indexed
+        # We delete and re-create
+        cdef int varindex = self.field_ind[varname]
+        cdef string *field_name = new string(varname)
+        if self.loaded[varindex] == 0: return
+        del self.hydro_datas[domain_index][varindex]
+        self.hydro_datas[domain_index - 1][varindex] = \
+            new RAMSES_hydro_data(deref(self.trees[domain_index]))
+        self.loaded[varindex] = 0
+        del field_name
+
+    def get_file_info(self):
+        header_info = {}
+        header_info["ncpu"] = self.rsnap.m_header.ncpu
+        header_info["ndim"] = self.rsnap.m_header.ndim
+        header_info["levelmin"] = self.rsnap.m_header.levelmin
+        header_info["levelmax"] = self.rsnap.m_header.levelmax
+        header_info["ngridmax"] = self.rsnap.m_header.ngridmax
+        header_info["nstep_coarse"] = self.rsnap.m_header.nstep_coarse
+        header_info["boxlen"] = self.rsnap.m_header.boxlen
+        header_info["time"] = self.rsnap.m_header.time
+        header_info["aexp"] = self.rsnap.m_header.aexp
+        header_info["H0"] = self.rsnap.m_header.H0
+        header_info["omega_m"] = self.rsnap.m_header.omega_m
+        header_info["omega_l"] = self.rsnap.m_header.omega_l
+        header_info["omega_k"] = self.rsnap.m_header.omega_k
+        header_info["omega_b"] = self.rsnap.m_header.omega_b
+        header_info["unit_l"] = self.rsnap.m_header.unit_l
+        header_info["unit_d"] = self.rsnap.m_header.unit_d
+        header_info["unit_t"] = self.rsnap.m_header.unit_t
+        return header_info
+
+    def fill_hierarchy_arrays(self, 
+                              np.ndarray[np.float64_t, ndim=2] left_edges,
+                              np.ndarray[np.float64_t, ndim=2] right_edges,
+                              np.ndarray[np.int32_t, ndim=2] grid_levels,
+                              np.ndarray[np.int64_t, ndim=2] grid_file_locations,
+                              np.ndarray[np.int32_t, ndim=2] child_mask):
+        # We need to do simulation domains here
+
+        cdef unsigned idomain, ilevel
+
+        # All the loop-local pointers must be declared up here
+        cdef RAMSES_tree *local_tree
+        cdef RAMSES_hydro_data *local_hydro_data
+        cdef unsigned father
+
+        cdef tree_iterator grid_it, grid_end, father_it
+        cdef vec[double] gvec
+        cdef int grid_ind = 0
+        cdef unsigned parent_ind
+        cdef bint ci
+
+        cdef double grid_half_width 
+
+        cdef np.int32_t rr
+        cell_count = []
+        level_cell_counts = {}
+        for idomain in range(1, self.rsnap.m_header.ncpu + 1):
+            local_tree = new RAMSES_tree(deref(self.rsnap), idomain,
+                                         self.rsnap.m_header.levelmax, 0)
+            local_tree.read()
+            local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
+            for ilevel in range(local_tree.m_maxlevel + 1):
+                # this gets overwritten for every domain, which is okay
+                level_cell_counts[ilevel] = grid_ind 
+                grid_half_width = self.rsnap.m_header.boxlen / (2**(ilevel + 1))
+                grid_it = local_tree.begin(ilevel)
+                grid_end = local_tree.end(ilevel)
+                while grid_it != grid_end:
+                    gvec = local_tree.grid_pos_double(grid_it)
+                    left_edges[grid_ind, 0] = gvec.x - grid_half_width
+                    left_edges[grid_ind, 1] = gvec.y - grid_half_width
+                    left_edges[grid_ind, 2] = gvec.z - grid_half_width
+                    right_edges[grid_ind, 0] = gvec.x + grid_half_width
+                    right_edges[grid_ind, 1] = gvec.y + grid_half_width
+                    right_edges[grid_ind, 2] = gvec.z + grid_half_width
+                    grid_levels[grid_ind, 0] = ilevel
+                    # Now the harder part
+                    father_it = grid_it.get_parent()
+                    grid_file_locations[grid_ind, 0] = <np.int64_t> idomain
+                    grid_file_locations[grid_ind, 1] = grid_ind - level_cell_counts[ilevel]
+                    parent_ind = father_it.get_absolute_position()
+                    if ilevel > 0:
+                        # We calculate the REAL parent index
+                        grid_file_locations[grid_ind, 2] = \
+                            level_cell_counts[ilevel - 1] + parent_ind
+                    else:
+                        grid_file_locations[grid_ind, 2] = -1
+                    for ci in range(8):
+                        rr = <np.int32_t> grid_it.is_finest(ci)
+                        child_mask[grid_ind, ci] = rr
+                    grid_ind += 1
+                    grid_it.next()
+            del local_tree, local_hydro_data
+
+    def read_oct_grid(self, char *field, int level, int domain, int grid_id):
+
+        self.ensure_loaded(field, domain - 1)
+        cdef int varindex = self.field_ind[field]
+        cdef int i
+
+        cdef np.ndarray[np.float64_t, ndim=3] tr = np.empty((2,2,2), dtype='float64',
+                                                   order='F')
+        cdef tree_iterator grid_it, grid_end
+        cdef double* data = <double*> tr.data
+
+        cdef RAMSES_tree *local_tree = self.trees[domain - 1]
+        cdef RAMSES_hydro_data *local_hydro_data = self.hydro_datas[domain - 1][varindex]
+
+        #inline ValueType_& cell_value( const typename TreeType_::iterator& it,
+        #                               int ind )
+        #{
+        #   unsigned ipos   = it.get_absolute_position();
+        #   unsigned ilevel = it.get_level();//-m_minlevel;
+        #   return (m_var_array[ilevel])[m_twotondim*ipos+ind];
+        #}
+        
+        for i in range(8): 
+            data[i] = local_hydro_data.m_var_array[level][8*grid_id+i]
+        return tr
+
+    def read_grid(self, char *field, 
+                  np.ndarray[np.int64_t, ndim=1] start_index,
+                  np.ndarray[np.int32_t, ndim=1] grid_dims,
+                  np.ndarray[np.float64_t, ndim=3] data,
+                  np.ndarray[np.int32_t, ndim=3] filled,
+                  int level, int ref_factor,
+                  component_grid_info):
+        cdef int varindex = self.field_ind[field]
+        cdef RAMSES_tree *local_tree = NULL
+        cdef RAMSES_hydro_data *local_hydro_data = NULL
+
+        cdef int gi, i, j, k, domain, offset
+        cdef int ir, jr, kr
+        cdef int offi, offj, offk
+        cdef np.int64_t di, dj, dk
+        cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
+        cdef np.ndarray[np.int64_t, ndim=1] og_start_index
+        cdef np.float64_t temp_data
+        cdef np.int64_t end_index[3]
+        cdef int to_fill = 0
+        # Note that indexing into a cell is:
+        #   (k*2 + j)*2 + i
+        for i in range(3):
+            end_index[i] = start_index[i] + grid_dims[i]
+        for gi in range(len(component_grid_info)):
+            ogrid_info = component_grid_info[gi]
+            domain = ogrid_info[0]
+            self.ensure_loaded(field, domain - 1)
+            local_tree = self.trees[domain - 1]
+            local_hydro_data = self.hydro_datas[domain - 1][varindex]
+            offset = ogrid_info[1]
+            og_start_index = ogrid_info[3:]
+            for i in range(2*ref_factor):
+                di = i + og_start_index[0] * ref_factor
+                if di < start_index[0] or di >= end_index[0]: continue
+                ir = <int> (i / ref_factor)
+                for j in range(2 * ref_factor):
+                    dj = j + og_start_index[1] * ref_factor
+                    if dj < start_index[1] or dj >= end_index[1]: continue
+                    jr = <int> (j / ref_factor)
+                    for k in range(2 * ref_factor):
+                        dk = k + og_start_index[2] * ref_factor
+                        if dk < start_index[2] or dk >= end_index[2]: continue
+                        kr = <int> (k / ref_factor)
+                        offi = di - start_index[0]
+                        offj = dj - start_index[1]
+                        offk = dk - start_index[2]
+                        #print offi, filled.shape[0],
+                        #print offj, filled.shape[1],
+                        #print offk, filled.shape[2]
+                        if filled[offi, offj, offk] == 1: continue
+
+                        odind = (kr*2 + jr)*2 + ir
+                        temp_data = local_hydro_data.m_var_array[
+                                level][8*offset + odind]
+                        data[offi, offj, offk] = temp_data
+                        filled[offi, offj, offk] = 1
+                        to_fill += 1
+        return to_fill
+
+cdef class ProtoSubgrid:
+    cdef np.int64_t *signature[3]
+    cdef np.int64_t left_edge[3]
+    cdef np.int64_t right_edge[3]
+    cdef np.int64_t dimensions[3]
+    cdef public np.float64_t efficiency
+    cdef public object sigs
+    cdef public object grid_file_locations
+    cdef public object dd
+        
+    #@cython.boundscheck(False)
+    #@cython.wraparound(False)
+    def __cinit__(self,
+                   np.ndarray[np.int64_t, ndim=1] left_index,
+                   np.ndarray[np.int64_t, ndim=1] dimensions, 
+                   np.ndarray[np.int64_t, ndim=2] left_edges,
+                   np.ndarray[np.int64_t, ndim=2] right_edges,
+                   np.ndarray[np.int64_t, ndim=2] grid_dimensions,
+                   np.ndarray[np.int64_t, ndim=2] grid_file_locations):
+        # This also includes the shrinking step.
+        cdef int i, ci, ng = left_edges.shape[0]
+        cdef np.ndarray temp_arr
+        cdef int l0, r0, l1, r1, l2, r2, i0, i1, i2
+        cdef np.int64_t temp_l[3], temp_r[3], ncells
+        cdef np.float64_t efficiency
+        self.sigs = []
+        for i in range(3):
+            temp_l[i] = left_index[i] + dimensions[i]
+            temp_r[i] = left_index[i]
+            self.signature[i] = NULL
+        for gi in range(ng):
+            if left_edges[gi,0] > left_index[0]+dimensions[0] or \
+               right_edges[gi,0] < left_index[0] or \
+               left_edges[gi,1] > left_index[1]+dimensions[1] or \
+               right_edges[gi,1] < left_index[1] or \
+               left_edges[gi,2] > left_index[2]+dimensions[2] or \
+               right_edges[gi,2] < left_index[2]:
+               #print "Skipping grid", gi, "which lies outside out box"
+               continue
+            for i in range(3):
+                temp_l[i] = i64min(left_edges[gi,i], temp_l[i])
+                temp_r[i] = i64max(right_edges[gi,i], temp_r[i])
+        for i in range(3):
+            self.left_edge[i] = i64max(temp_l[i], left_index[i])
+            self.right_edge[i] = i64min(temp_r[i], left_index[i] + dimensions[i])
+            self.dimensions[i] = self.right_edge[i] - self.left_edge[i]
+            if self.dimensions[i] <= 0:
+                self.efficiency = -1.0
+                return
+            self.sigs.append(np.zeros(self.dimensions[i], 'int64'))
+        #print self.sigs[0].size, self.sigs[1].size, self.sigs[2].size
+        
+        # My guess is that this whole loop could be done more efficiently.
+        # However, this is clear and straightforward, so it is a good first
+        # pass.
+        cdef np.ndarray[np.int64_t, ndim=1] sig0, sig1, sig2
+        sig0 = self.sigs[0]
+        sig1 = self.sigs[1]
+        sig2 = self.sigs[2]
+        efficiency = 0.0
+        cdef int used
+        self.grid_file_locations = []
+        for gi in range(ng):
+            used = 0
+            nnn = 0
+            for l0 in range(grid_dimensions[gi, 0]):
+                i0 = left_edges[gi, 0] + l0
+                if i0 < self.left_edge[0]: continue
+                if i0 >= self.right_edge[0]: break
+                for l1 in range(grid_dimensions[gi, 1]):
+                    i1 = left_edges[gi, 1] + l1
+                    if i1 < self.left_edge[1]: continue
+                    if i1 >= self.right_edge[1]: break
+                    for l2 in range(grid_dimensions[gi, 2]):
+                        i2 = left_edges[gi, 2] + l2
+                        if i2 < self.left_edge[2]: continue
+                        if i2 >= self.right_edge[2]: break
+                        i = i0 - self.left_edge[0]
+                        sig0[i] += 1
+                        i = i1 - self.left_edge[1]
+                        sig1[i] += 1
+                        i = i2 - self.left_edge[2]
+                        sig2[i] += 1
+                        efficiency += 1
+                        used = 1
+            if used == 1:
+                grid_file_locations[gi,3] = left_edges[gi, 0]
+                grid_file_locations[gi,4] = left_edges[gi, 1]
+                grid_file_locations[gi,5] = left_edges[gi, 2]
+                self.grid_file_locations.append(grid_file_locations[gi,:])
+         
+        self.dd = np.ones(3, dtype='int64')
+        for i in range(3):
+            efficiency /= self.dimensions[i]
+            self.dd[i] = self.dimensions[i]
+        #print "Efficiency is %0.3e" % (efficiency)
+        self.efficiency = efficiency
+
+    def find_split(self):
+        # First look for zeros
+        cdef int i, center, ax
+        cdef np.ndarray[ndim=1, dtype=np.int64_t] axes
+        cdef np.int64_t strength, zcstrength, zcp
+        axes = np.argsort(self.dd)[::-1]
+        cdef np.ndarray[np.int64_t] sig
+        for axi in range(3):
+            ax = axes[axi]
+            center = self.dimensions[ax] / 2
+            sig = self.sigs[ax]
+            for i in range(self.dimensions[ax]):
+                if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
+                    #print "zero: %s (%s)" % (i, self.dimensions[ax])
+                    return 'zs', ax, i
+        zcstrength = 0
+        zcp = 0
+        zca = -1
+        cdef int temp
+        cdef np.int64_t *sig2d
+        for axi in range(3):
+            ax = axes[axi]
+            sig = self.sigs[ax]
+            sig2d = <np.int64_t *> malloc(sizeof(np.int64_t) * self.dimensions[ax])
+            sig2d[0] = sig2d[self.dimensions[ax]-1] = 0
+            for i in range(1, self.dimensions[ax] - 1):
+                sig2d[i] = sig[i-1] - 2*sig[i] + sig[i+1]
+            for i in range(1, self.dimensions[ax] - 1):
+                if sig2d[i] * sig2d[i+1] <= 0:
+                    strength = labs(sig2d[i] - sig2d[i+1])
+                    if (strength > zcstrength) or \
+                       (strength == zcstrength and (abs(center - i) <
+                                                    abs(center - zcp))):
+                        zcstrength = strength
+                        zcp = i
+                        zca = ax
+            free(sig2d)
+        #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
+        return 'zc', ax, zcp
+
+    def get_properties(self):
+        cdef np.ndarray[np.int64_t, ndim=2] tr = np.empty((3,3), dtype='int64')
+        cdef int i
+        for i in range(3):
+            tr[0,i] = self.left_edge[i]
+            tr[1,i] = self.right_edge[i]
+            tr[2,i] = self.dimensions[i]
+        return tr

Modified: trunk/yt/raven/Callbacks.py
==============================================================================
--- trunk/yt/raven/Callbacks.py	(original)
+++ trunk/yt/raven/Callbacks.py	Tue Aug 10 16:02:10 2010
@@ -263,6 +263,81 @@
                     plot._axes.text(left_edge_px[n]+2,left_edge_py[n]+2,ids[n])
             plot._axes.hold(False)
 
+class StreamlineCallback(PlotCallback):
+    _type_name = "streamlines"
+    def __init__(self, field_x, field_y, factor=6.0, nx=16, ny=16,
+                 xstart=(0,1), ystart=(0,1), nsample=256,
+                 start_at_xedge=False, start_at_yedge=False,
+                 plot_args=None):
+        """
+        Add streamlines to any plot, using the *field_x* and *field_y*
+        from the associated data, using *nx* and *ny* starting points
+        that are bounded by *xstart* and *ystart*.  To begin
+        streamlines from the left edge of the plot, set
+        *start_at_xedge* to True; for the bottom edge, use
+        *start_at_yedge*.  A line with the qmean vector magnitude will
+        cover 1.0/*factor* of the image.
+        """
+        PlotCallback.__init__(self)
+        self.field_x = field_x
+        self.field_y = field_y
+        self.xstart = xstart
+        self.ystart = ystart
+        self.nsample = nsample
+        self.factor = factor
+        if start_at_xedge:
+            self.data_size = (1,ny)
+        elif start_at_yedge:
+            self.data_size = (nx,1)
+        else:
+            self.data_size = (nx,ny)
+        if plot_args is None: plot_args = {'color':'k', 'linestyle':'-'}
+        self.plot_args = plot_args
+        
+    def __call__(self, plot):
+        x0, x1 = plot.xlim
+        y0, y1 = plot.ylim
+        xx0, xx1 = plot._axes.get_xlim()
+        yy0, yy1 = plot._axes.get_ylim()
+        plot._axes.hold(True)
+        nx = plot.image._A.shape[0]
+        ny = plot.image._A.shape[1]
+        pixX = _MPL.Pixelize(plot.data['px'],
+                             plot.data['py'],
+                             plot.data['pdx'],
+                             plot.data['pdy'],
+                             plot.data[self.field_x],
+                             int(nx), int(ny),
+                           (x0, x1, y0, y1),)
+        pixY = _MPL.Pixelize(plot.data['px'],
+                             plot.data['py'],
+                             plot.data['pdx'],
+                             plot.data['pdy'],
+                             plot.data[self.field_y],
+                             int(nx), int(ny),
+                           (x0, x1, y0, y1),)
+        r0 = na.mgrid[self.xstart[0]*nx:self.xstart[1]*nx:self.data_size[0]*1j,
+                      self.ystart[0]*ny:self.ystart[1]*ny:self.data_size[1]*1j]
+        lines = na.zeros((self.nsample, 2, self.data_size[0], self.data_size[1]))
+        lines[0,:,:,:] = r0
+        mag = na.sqrt(pixX**2 + pixY**2)
+        scale = na.sqrt(nx*ny) / (self.factor * mag.mean())
+        dt = 1.0 / (self.nsample-1)
+        for i in range(1,self.nsample):
+            xt = lines[i-1,0,:,:]
+            yt = lines[i-1,1,:,:]
+            ix = na.maximum(na.minimum((xt).astype('int'), nx-1), 0)
+            iy = na.maximum(na.minimum((yt).astype('int'), ny-1), 0)
+            lines[i,0,:,:] = xt + dt * pixX[ix,iy] * scale
+            lines[i,1,:,:] = yt + dt * pixY[ix,iy] * scale
+        for i in range(self.data_size[0]):
+            for j in range(self.data_size[1]):
+                plot._axes.plot(lines[:,0,i,j], lines[:,1,i,j],
+                                **self.plot_args)
+        plot._axes.set_xlim(xx0,xx1)
+        plot._axes.set_ylim(yy0,yy1)
+        plot._axes.hold(False)
+
 class LabelCallback(PlotCallback):
     _type_name = "axis_label"
     def __init__(self, label):

Modified: trunk/yt/setup.py
==============================================================================
--- trunk/yt/setup.py	(original)
+++ trunk/yt/setup.py	Tue Aug 10 16:02:10 2010
@@ -2,6 +2,9 @@
 import setuptools
 import os, sys
 
+def check_for_ramses():
+    return os.environ.get("RAMSES", None)
+
 def check_for_png():
     # First up: HDF5_DIR in environment
     if "PNG_DIR" in os.environ:
@@ -73,6 +76,9 @@
         include_dirs=["yt/_amr_utils/", png_inc],
         library_dirs=[png_lib],
         libraries=["m", "png"])
+    config.add_extension("ramses_reader",
+        ["yt/ramses_reader.cpp"],
+        include_dirs=["yt/ramses_headers/"])
     config.make_config_py()
     config.make_svn_version_py()
     return config



More information about the yt-svn mailing list