[Yt-svn] yt-commit r1811 - in trunk/yt: . _amr_utils extensions lagos

mturk at wrangler.dreamhost.com mturk at wrangler.dreamhost.com
Tue Aug 24 18:57:00 PDT 2010


Author: mturk
Date: Tue Aug 24 18:56:59 2010
New Revision: 1811
URL: http://yt.enzotools.org/changeset/1811

Log:
Big backport from hg.  New functionality is mainly the EnzoSimulation changes
from Britton and Stephen's vector math modules.  Some fixes to FLASH from John
ZuHone and some fixes to Gadget from Chris Moody.  Also, new "--detailed" and
"--paste-detailed" options from me, to use the cgitb module for much more
detailed output of tracebacks



Added:
   trunk/yt/lagos/two_point_functions.py
      - copied unchanged from r1806, /trunk/yt/lagos/TwoPointFunctions.py
Removed:
   trunk/yt/lagos/TwoPointFunctions.py
Modified:
   trunk/yt/_amr_utils/DepthFirstOctree.pyx
   trunk/yt/extensions/enzo_simulation.py
   trunk/yt/extensions/merger_tree.py
   trunk/yt/funcs.py
   trunk/yt/lagos/BaseGridType.py
   trunk/yt/lagos/EnzoFields.py
   trunk/yt/lagos/FLASHFields.py
   trunk/yt/lagos/HaloFinding.py
   trunk/yt/lagos/HierarchyType.py
   trunk/yt/lagos/OutputTypes.py
   trunk/yt/lagos/UniversalFields.py
   trunk/yt/lagos/__init__.py
   trunk/yt/math_utils.py
   trunk/yt/ramses_reader.pyx

Modified: trunk/yt/_amr_utils/DepthFirstOctree.pyx
==============================================================================
--- trunk/yt/_amr_utils/DepthFirstOctree.pyx	(original)
+++ trunk/yt/_amr_utils/DepthFirstOctree.pyx	Tue Aug 24 18:56:59 2010
@@ -77,15 +77,15 @@
     cdef np.float64_t child_dx
     cdef np.ndarray[np.float64_t, ndim=1] child_leftedges
     cdef np.float64_t cx, cy, cz
-    for k_off in range(k_f):
-        k = k_off + k_i
-        cz = (leftedges[2] + k*dx)
+    for i_off in range(i_f):
+        i = i_off + i_i
+        cx = (leftedges[0] + i*dx)
         for j_off in range(j_f):
             j = j_off + j_i
             cy = (leftedges[1] + j*dx)
-            for i_off in range(i_f):
-                i = i_off + i_i
-                cx = (leftedges[0] + i*dx)
+            for k_off in range(k_f):
+                k = k_off + k_i
+                cz = (leftedges[2] + k*dx)
                 ci = grid.child_indices[i,j,k]
                 if ci == -1:
                     for fi in range(fields.shape[0]):

Modified: trunk/yt/extensions/enzo_simulation.py
==============================================================================
--- trunk/yt/extensions/enzo_simulation.py	(original)
+++ trunk/yt/extensions/enzo_simulation.py	Tue Aug 24 18:56:59 2010
@@ -2,6 +2,7 @@
 import yt.lagos as lagos
 import yt.funcs as funcs
 import numpy as na
+import glob
 import os
 
 dt_Tolerance = 1e-3
@@ -12,7 +13,8 @@
     a simulation from one redshift to another.
     """
     def __init__(self, EnzoParameterFile, initial_time=None, final_time=None, initial_redshift=None, final_redshift=None,
-                 links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False):
+                 links=False, enzo_parameters=None, get_time_outputs=True, get_redshift_outputs=True, get_available_data=False,
+                 get_data_by_force=False):
         """
         Initialize an EnzoSimulation object.
         :param initial_time (float): the initial time in code units for the dataset list.  Default: None.
@@ -31,12 +33,15 @@
                be added to the dataset list.  Default: True.
         :param get_redshift_outputs (bool): if False, the redshift datasets will not be added to the dataset list.  Default: True.
         :param get_available_data (bool): if True, only datasets that are found to exist at the file path are added 
-               to the list.  Devault: False.
+               to the list.  Default: False.
+        :param get_data_by_force (bool): if True, time data dumps are not calculated using dtDataDump.  Instead, the 
+               the working directory is searched for directories that match the datadumpname keyword.  Each dataset 
+               is loaded up to get the time and redshift manually.  This is useful with collapse simulations that use 
+               OutputFirstTimeAtLevel or with simulations that make outputs based on cycle numbers.  Default: False.
         """
         self.EnzoParameterFile = EnzoParameterFile
         self.enzoParameters = {}
-        self.redshiftOutputs = []
-        self.timeOutputs = []
+        self.redshift_outputs = []
         self.allOutputs = []
         self.InitialTime = initial_time
         self.FinalTime = final_time
@@ -52,76 +57,26 @@
         EnzoParameterDict.update(enzo_parameters)
 
         # Set some parameter defaults.
-        self._SetParameterDefaults()
+        self._set_parameter_defaults()
 
         # Read parameters.
-        self._ReadEnzoParameterFile()
+        self._read_enzo_parameter_file()
 
-        # Check for sufficient starting/ending parameters.
-        if self.InitialTime is None and self.InitialRedshift is None:
-            if self.enzoParameters['ComovingCoordinates'] and \
-               'CosmologyInitialRedshift' in self.enzoParameters:
-                self.InitialRedshift = self.enzoParameters['CosmologyInitialRedshift']
-            elif 'InitialTime' in self.enzoParameters:
-                self.InitialTime = self.enzoParameters['InitialTime']
-            else:
-                mylog.error("Couldn't find parameter for initial time or redshift from parameter file.")
-                return None
-
-        if self.FinalTime is None and self.FinalRedshift is None:
-            if self.enzoParameters['ComovingCoordinates'] and \
-               'CosmologyFinalRedshift' in self.enzoParameters:
-                self.FinalRedshift = self.enzoParameters['CosmologyFinalRedshift']
-            elif 'StopTime' in self.enzoParameters:
-                self.FinalTime = self.enzoParameters['StopTime']
-            else:
-                mylog.error("Couldn't find parameter for final time or redshift from parameter file.")
-                return None
-
-        # Convert initial/final redshifts to times.
-        if self.enzoParameters['ComovingCoordinates']:
-            # Instantiate a cosmology calculator.
-            self.cosmology = lagos.Cosmology(HubbleConstantNow = 
-                                             (100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
-                                             OmegaMatterNow = self.enzoParameters['CosmologyOmegaMatterNow'],
-                                             OmegaLambdaNow = self.enzoParameters['CosmologyOmegaLambdaNow'])
+        # Get all the appropriate datasets.
+        self._get_all_outputs(brute_force=get_data_by_force)
 
-            # Instantiate EnzoCosmology object for units and time conversions.
-            self.enzo_cosmology = lagos.EnzoCosmology(HubbleConstantNow = 
-                                                 (100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
-                                                 OmegaMatterNow = self.enzoParameters['CosmologyOmegaMatterNow'],
-                                                 OmegaLambdaNow = self.enzoParameters['CosmologyOmegaLambdaNow'],
-                                                 InitialRedshift = self.enzoParameters['CosmologyInitialRedshift'])
-            if self.InitialRedshift is not None:
-                self.InitialTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.InitialRedshift) / \
-                    self.enzo_cosmology.TimeUnits
-            if self.FinalRedshift is not None:
-                self.FinalTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.FinalRedshift) / \
-                    self.enzo_cosmology.TimeUnits
-
-        # Get initial time of simulation.
-        if self.enzoParameters['ComovingCoordinates'] and \
-                'CosmologyInitialRedshift' in self.enzoParameters:
-            self.SimulationInitialTime = self.enzo_cosmology.InitialTime / self.enzo_cosmology.TimeUnits
-        elif 'InitialTime' in self.enzoParameters:
-            self.SimulationInitialTime = self.enzoParameters['InitialTime']
-        else:
-            self.SimulationInitialTime = 0.0
-
-        # Combine all data dumps.
-        self._CombineDataOutputs()
-
-    def _CalculateRedshiftDumpTimes(self):
+    def _calculate_redshift_dump_times(self):
         "Calculates time from redshift of redshift dumps."
 
-        for output in self.redshiftOutputs:
+        for output in self.redshift_outputs:
             output['time'] = self.enzo_cosmology.ComputeTimeFromRedshift(output['redshift']) / \
                 self.enzo_cosmology.TimeUnits
 
-    def _CalculateTimeDumps(self):
+    def _calculate_time_dumps(self):
         "Calculates time dumps and their redshifts if cosmological."
 
-        if self.enzoParameters['dtDataDump'] <= 0.0: return
+        if self.enzoParameters['dtDataDump'] <= 0.0: return []
+        time_outputs = []
 
         index = 0
         current_time = self.SimulationInitialTime
@@ -137,38 +92,39 @@
                 output['redshift'] = self.enzo_cosmology.ComputeRedshiftFromTime(t)
 
             if not self.get_available_data or os.path.exists(filename):
-                self.timeOutputs.append(output)
+                time_outputs.append(output)
 
             current_time += self.enzoParameters['dtDataDump']
             index += 1
 
-    def _CombineDataOutputs(self):
+        return time_outputs
+
+    def _combine_data_outputs(self, brute_force=False):
         "Combines redshift and time data into one sorted list."
 
-        # Calculate redshifts for dt data dumps.
-        if self.enzoParameters.has_key('dtDataDump') and self.get_time_outputs:
-            self._CalculateTimeDumps()
+        # If True, get time dumps just by looking through the working directory.
+        if brute_force:
+            time_outputs = self._get_outputs_by_force()
+
+        # Calculate time dumps based on dtDataDump
+        elif self.enzoParameters.has_key('dtDataDump') and self.get_time_outputs:
+            time_outputs = self._calculate_time_dumps()
 
         # Calculate times for redshift dumps.
         if self.enzoParameters['ComovingCoordinates'] and self.get_redshift_outputs:
-            self._CalculateRedshiftDumpTimes()
+            self._calculate_redshift_dump_times()
         else:
-            self.redshiftOutputs = []
-
-        if self.get_time_outputs and self.get_redshift_outputs:
-            self.allOutputs = self.redshiftOutputs + self.timeOutputs
-        elif self.get_time_outputs and not self.get_redshift_outputs:
-            self.allOutputs = self.timeOutputs
-        elif not self.get_time_outputs and self.get_redshift_outputs:
-            self.allOutputs = self.redshiftOutputs
+            self.redshift_outputs = []
 
+        self.allOutputs = self.redshift_outputs + time_outputs
         self.allOutputs.sort(key=lambda obj:obj['time'])
 
         start_index = None
         end_index = None
 
+        # Add links to next and previous dataset to each entry.
         for q in range(len(self.allOutputs)):
-            del self.allOutputs[q]['index']
+            if self.allOutputs[q].has_key('index'): del self.allOutputs[q]['index']
 
             if start_index is None:
                 if self.allOutputs[q]['time'] >= self.InitialTime or \
@@ -197,15 +153,104 @@
                 else:
                     self.allOutputs[q]['next'] = self.allOutputs[q+1]
 
-        del self.redshiftOutputs
-        del self.timeOutputs
+        del self.redshift_outputs
 
         if end_index is None:
             end_index = len(self.allOutputs)-1
 
         self.allOutputs = self.allOutputs[start_index:end_index+1]
+        mylog.info("Loaded %s total data outputs." % len(self.allOutputs))
+
+    def _get_all_outputs(self, brute_force=False):
+        """
+        Get all the datasets in the time/redshift interval requested, or search 
+        the data directory for potential datasets.
+        """
+
+        # Check for sufficient starting/ending parameters.
+        if self.InitialTime is None and self.InitialRedshift is None:
+            if self.enzoParameters['ComovingCoordinates'] and \
+               'CosmologyInitialRedshift' in self.enzoParameters:
+                self.InitialRedshift = self.enzoParameters['CosmologyInitialRedshift']
+            elif 'InitialTime' in self.enzoParameters:
+                self.InitialTime = self.enzoParameters['InitialTime']
+            else:
+                mylog.error("Couldn't find parameter for initial time or redshift from parameter file.")
+                return None
+
+        if self.FinalTime is None and self.FinalRedshift is None:
+            if self.enzoParameters['ComovingCoordinates'] and \
+               'CosmologyFinalRedshift' in self.enzoParameters:
+                self.FinalRedshift = self.enzoParameters['CosmologyFinalRedshift']
+            elif 'StopTime' in self.enzoParameters:
+                self.FinalTime = self.enzoParameters['StopTime']
+            else:
+                mylog.error("Couldn't find parameter for final time or redshift from parameter file.")
+                return None
+
+        # Convert initial/final redshifts to times.
+        if self.enzoParameters['ComovingCoordinates']:
+            # Instantiate a cosmology calculator.
+            self.cosmology = lagos.Cosmology(HubbleConstantNow = 
+                                             (100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
+                                             OmegaMatterNow = self.enzoParameters['CosmologyOmegaMatterNow'],
+                                             OmegaLambdaNow = self.enzoParameters['CosmologyOmegaLambdaNow'])
+
+            # Instantiate EnzoCosmology object for units and time conversions.
+            self.enzo_cosmology = lagos.EnzoCosmology(HubbleConstantNow = 
+                                                 (100.0 * self.enzoParameters['CosmologyHubbleConstantNow']),
+                                                 OmegaMatterNow = self.enzoParameters['CosmologyOmegaMatterNow'],
+                                                 OmegaLambdaNow = self.enzoParameters['CosmologyOmegaLambdaNow'],
+                                                 InitialRedshift = self.enzoParameters['CosmologyInitialRedshift'])
+            if self.InitialRedshift is not None:
+                self.InitialTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.InitialRedshift) / \
+                    self.enzo_cosmology.TimeUnits
+            if self.FinalRedshift is not None:
+                self.FinalTime = self.enzo_cosmology.ComputeTimeFromRedshift(self.FinalRedshift) / \
+                    self.enzo_cosmology.TimeUnits
+
+        # Get initial time of simulation.
+        if self.enzoParameters['ComovingCoordinates'] and \
+                'CosmologyInitialRedshift' in self.enzoParameters:
+            self.SimulationInitialTime = self.enzo_cosmology.InitialTime / self.enzo_cosmology.TimeUnits
+        elif 'InitialTime' in self.enzoParameters:
+            self.SimulationInitialTime = self.enzoParameters['InitialTime']
+        else:
+            self.SimulationInitialTime = 0.0
+
+        # Combine all data dumps in to ordered list.
+        self._combine_data_outputs(brute_force=brute_force)
+
+    def _get_outputs_by_force(self):
+        """
+        Search for directories matching the data dump keywords.
+        If found, get dataset times by brute force py opening the pf.
+        """
+
+        # look for time dumps.
+        potential_time_outputs = glob.glob("%s/%s*" % (self.enzoParameters['GlobalDir'], 
+                                                       self.enzoParameters['DataDumpDir']))
+        time_outputs = []
+        mylog.info("Checking validity of %d potential time dumps." % 
+                   len(potential_time_outputs))
+
+        for output in potential_time_outputs:
+            index = output[output.find(self.enzoParameters['DataDumpDir']) + 
+                           len(self.enzoParameters['DataDumpDir']):]
+            filename = "%s/%s%s/%s%s" % (self.enzoParameters['GlobalDir'],
+                                         self.enzoParameters['DataDumpDir'], index,
+                                         self.enzoParameters['DataDumpName'], index)
+            if os.path.exists(filename):
+                pf = lagos.EnzoStaticOutput(filename)
+                if pf is not None:
+                    time_outputs.append({'filename': filename, 'time': pf['InitialTime']})
+                    if self.enzoParameters['ComovingCoordinates']:
+                        time_outputs[-1]['redshift'] = pf['CosmologyCurrentRedshift']
+                del pf
+        mylog.info("Located %d time dumps." % len(time_outputs))
+        return time_outputs
 
-    def _ReadEnzoParameterFile(self):
+    def _read_enzo_parameter_file(self):
         "Reads an Enzo parameter file looking for cosmology and output parameters."
         lines = open(self.EnzoParameterFile).readlines()
         for line in lines:
@@ -228,23 +273,23 @@
                     self.enzoParameters[param] = t
             elif param.startswith("CosmologyOutputRedshift["):
                 index = param[param.find("[")+1:param.find("]")]
-                self.redshiftOutputs.append({'index':int(index), 'redshift':float(vals)})
+                self.redshift_outputs.append({'index':int(index), 'redshift':float(vals)})
 
         # Add filenames to redshift outputs.
         tempRedshiftList = []
-        for output in self.redshiftOutputs:
+        for output in self.redshift_outputs:
             output["filename"] = "%s/%s%04d/%s%04d" % (self.enzoParameters['GlobalDir'],
                                                        self.enzoParameters['RedshiftDumpDir'],output['index'],
                                                        self.enzoParameters['RedshiftDumpName'],output['index'])
             if not self.get_available_data or os.path.exists(output["filename"]):
                 tempRedshiftList.append(output)
-        self.redshiftOutputs = tempRedshiftList
+        self.redshift_outputs = tempRedshiftList
         del tempRedshiftList
 
-    def _SetParameterDefaults(self):
+    def _set_parameter_defaults(self):
         "Set some default parameters to avoid problems if they are not in the parameter file."
         self.enzoParameters['GlobalDir'] = "."
-        self.enzoParameters['RedshiftDumpName'] = "RD"
+        self.enzoParameters['RedshiftDumpName'] = "RedshiftOutput"
         self.enzoParameters['RedshiftDumpDir'] = "RD"
         self.enzoParameters['DataDumpName'] = "data"
         self.enzoParameters['DataDumpDir'] = "DD"
@@ -273,7 +318,7 @@
                 rounded += na.power(10.0,(-1.0*decimals))
             z = rounded
 
-            deltaz_max = deltaz_forward(self.cosmology, z, self.enzoParameters['CosmologyComovingBoxSize'])
+            deltaz_max = __deltaz_forward(self.cosmology, z, self.enzoParameters['CosmologyComovingBoxSize'])
             outputs.append({'redshift': z, 'deltazMax': deltaz_max})
             z -= deltaz_max
 
@@ -497,7 +542,7 @@
                      "DataDumpDir": str,
                      "GlobalDir" : str}
 
-def deltaz_forward(cosmology, z, target_distance):
+def __deltaz_forward(cosmology, z, target_distance):
     "Calculate deltaz corresponding to moving a comoving distance starting from some redshift."
 
     d_Tolerance = 1e-4

Modified: trunk/yt/extensions/merger_tree.py
==============================================================================
--- trunk/yt/extensions/merger_tree.py	(original)
+++ trunk/yt/extensions/merger_tree.py	Tue Aug 24 18:56:59 2010
@@ -819,6 +819,34 @@
                 results = self.cursor.fetchone()
         return parents
 
+    def get_direct_parent(self, GlobalHaloID):
+        r"""Returns the GlobalHaloID of the direct parent of the given halo.
+        
+        This is accomplished by identifying the most massive parent halo
+        that contributes at least 50% of its mass to the given halo.
+        
+        Parameters
+        ----------
+        GlobalHaloID : Integer
+            The GlobalHaloID of the halo of interest.
+        
+        Examples
+        --------
+        >>> parent = mtc.get_direct_parent(1688)
+        >>> print parent
+        1544
+        """
+        parents = self.get_halo_parents(GlobalHaloID)
+        mass = 0
+        ID = None
+        for parent in parents:
+            if parent[1] < 0.5: continue
+            info = self.get_halo_info(parent[0])
+            if info['HaloMass'] > mass:
+                mass = info['HaloMass']
+                ID = parent[0]
+        return ID
+
     def get_halo_info(self, GlobalHaloID):
         r"""Returns all available information for the given GlobalHaloID
         in the form of a dict.

Modified: trunk/yt/funcs.py
==============================================================================
--- trunk/yt/funcs.py	(original)
+++ trunk/yt/funcs.py	Tue Aug 24 18:56:59 2010
@@ -394,18 +394,43 @@
     print "Traceback pasted to http://paste.enzotools.org/show/%s" % (ret)
     print
 
+def paste_traceback_detailed(exc_type, exc, tb):
+    """
+    This is a traceback handler that knows how to paste to the pastebin.
+    Should only be used in sys.excepthook.
+    """
+    import xmlrpclib, cStringIO, cgitb
+    s = cStringIO.StringIO()
+    handler = cgitb.Hook(format="text", file = s)
+    handler(exc_type, exc, tb)
+    s = s.getvalue()
+    print s
+    p = xmlrpclib.ServerProxy(
+            "http://paste.enzotools.org/xmlrpc/",
+            allow_none=True)
+    ret = p.pastes.newPaste('text', s, None, '', '', True)
+    print
+    print "Traceback pasted to http://paste.enzotools.org/show/%s" % (ret)
+    print
+
 # If we recognize one of the arguments on the command line as indicating a
 # different mechanism for handling tracebacks, we attach one of those handlers
 # and remove the argument from sys.argv.
 if "--paste" in sys.argv:
     sys.excepthook = paste_traceback
     del sys.argv[sys.argv.index("--paste")]
+elif "--paste-detailed" in sys.argv:
+    sys.excepthook = paste_traceback_detailed
+    del sys.argv[sys.argv.index("--paste-detailed")]
 elif "--detailed" in sys.argv:
     import cgitb; cgitb.enable(format="text")
     del sys.argv[sys.argv.index("--detailed")]
 elif "--rpdb" in sys.argv:
     sys.excepthook = rpdb.rpdb_excepthook
     del sys.argv[sys.argv.index("--rpdb")]
+elif "--detailed" in sys.argv:
+    import cgitb; cgitb.enable(format="text")
+    del sys.argv[sys.argv.index("--detailed")]
 
 #
 # Some exceptions

Modified: trunk/yt/lagos/BaseGridType.py
==============================================================================
--- trunk/yt/lagos/BaseGridType.py	(original)
+++ trunk/yt/lagos/BaseGridType.py	Tue Aug 24 18:56:59 2010
@@ -27,6 +27,8 @@
 #import yt.enki, gc
 from yt.funcs import *
 
+import pdb
+
 class AMRGridPatch(object):
     _spatial = True
     _num_ghost_zones = 0
@@ -362,7 +364,7 @@
         mask[startIndex[0]:endIndex[0],
              startIndex[1]:endIndex[1],
              startIndex[2]:endIndex[2]] = tofill
-
+        
     def __generate_child_mask(self):
         """
         Generates self.child_mask, which is zero where child grids exist (and
@@ -598,8 +600,9 @@
         self.N = 0
         self.Address = ''
         self.NumberOfParticles = self.N
-        self.ActiveDimensions = [0,0,0]
+        self.ActiveDimensions = na.array([0,0,0])
         self._id_offset = 0
+        self.start_index = na.array([0,0,0])
         
         for key,val in kwargs.items():
             if key in dir(self):
@@ -609,6 +612,9 @@
     #def __repr__(self):
     #    return "GadgetGrid_%04i" % (self.Address)
     
+    def get_global_startindex(self):
+        return self.start_index
+        
     def _prepare_grid(self):
         #all of this info is already included in the snapshots
         pass
@@ -621,12 +627,13 @@
         # So first we figure out what the index is.  We don't assume
         # that dx=dy=dz , at least here.  We probably do elsewhere.
         id = self.id
-        LE, RE = self.hierarchy.grid_left_edge[id,:], \
-                     self.hierarchy.grid_right_edge[id,:]
+        LE, RE = self.LeftEdge,self.RightEdge
         self.dds = na.array((RE-LE)/self.ActiveDimensions)
         if self.pf["TopGridRank"] < 2: self.dds[1] = 1.0
         if self.pf["TopGridRank"] < 3: self.dds[2] = 1.0
         self.data['dx'], self.data['dy'], self.data['dz'] = self.dds
+    
+        
 
 
 class ChomboGrid(AMRGridPatch):

Modified: trunk/yt/lagos/EnzoFields.py
==============================================================================
--- trunk/yt/lagos/EnzoFields.py	(original)
+++ trunk/yt/lagos/EnzoFields.py	Tue Aug 24 18:56:59 2010
@@ -114,6 +114,12 @@
 add_field("ThermalEnergy", function=_ThermalEnergy,
           units=r"\rm{ergs}/\rm{cm^3}")
 
+def _KineticEnergy(field, data):
+    return 0.5*data["Density"] * ( data["x-velocity"]**2.0
+                                   + data["y-velocity"]**2.0
+                                   + data["z-velocity"]**2.0 )
+add_field("KineticEnergy",function=_KineticEnergy,
+          units = r"\rm{ergs}/\rm{cm^3}")
 # This next section is the energy field section
 # Note that we have aliases that manually unconvert themselves.
 # This is because numerous code branches use Gas_Energy or GasEnergy

Modified: trunk/yt/lagos/FLASHFields.py
==============================================================================
--- trunk/yt/lagos/FLASHFields.py	(original)
+++ trunk/yt/lagos/FLASHFields.py	Tue Aug 24 18:56:59 2010
@@ -76,15 +76,15 @@
           validators = [ValidateDataField("game")],
           units = r"\rm{ratio\/of\/specific\/heats}")
 
-add_field("gpot", function=lambda a,b: None, take_log=True,
+add_field("gpot", function=lambda a,b: None, take_log=False,
           validators = [ValidateDataField("gpot")],
           units = r"\rm{ergs\//\/g}")
 
-add_field("gpot", function=lambda a,b: None, take_log=True,
+add_field("gpol", function=lambda a,b: None, take_log=False,
           validators = [ValidateDataField("gpol")],
           units = r"\rm{ergs\//\/g}")
 
-add_field("grac", function=lambda a,b: None, take_log=True,
+add_field("grac", function=lambda a,b: None, take_log=False,
           validators = [ValidateDataField("grac")],
           units = r"\rm{cm\/s^{-2}}")
 
@@ -95,3 +95,23 @@
 add_field("pres", function=lambda a,b: None, take_log=True,
           validators = [ValidateDataField("pres")],
           units = r"\rm{erg}\//\/\rm{cm}^{3}")
+
+add_field("magx", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("magx")],
+          units = r"\rm{G}")
+
+add_field("magy", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("magy")],
+          units = r"\rm{G}")
+
+add_field("magz", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("magz")],
+          units = r"\rm{G}")
+
+add_field("magp", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("magp")],
+          units = r"\rm{erg}\//\/\rm{cm}^{3}")
+
+add_field("divb", function=lambda a,b: None, take_log=False,
+          validators = [ValidateDataField("divb")],
+          units = r"\rm{G}\/\rm{cm}")

Modified: trunk/yt/lagos/HaloFinding.py
==============================================================================
--- trunk/yt/lagos/HaloFinding.py	(original)
+++ trunk/yt/lagos/HaloFinding.py	Tue Aug 24 18:56:59 2010
@@ -255,6 +255,9 @@
                    + ["particle_velocity_%s" % ax for ax in 'xyz'] \
                    + ["particle_index"] + ["ParticleMassMsun"]:
             handle.create_dataset("/%s/%s" % (gn, field), data=self[field])
+        if 'creation_time' in self.data.pf.h.field_list:
+            handle.create_dataset("/%s/creation_time" % gn,
+                data=self['creation_time'])
         n = handle["/%s" % gn]
         # set attributes on n
         self._processing = False

Modified: trunk/yt/lagos/HierarchyType.py
==============================================================================
--- trunk/yt/lagos/HierarchyType.py	(original)
+++ trunk/yt/lagos/HierarchyType.py	Tue Aug 24 18:56:59 2010
@@ -1219,7 +1219,7 @@
         #example string:
         #"(S'VEL'\np1\nS'ID'\np2\nS'MASS'\np3\ntp4\n."
         #fields are surrounded with '
-        fields_string=self._handle['root'].attrs['names']
+        fields_string=self._handle['root'].attrs['fieldnames']
         #splits=fields_string.split("'")
         #pick out the odd fields
         #fields= [splits[j] for j in range(1,len(splits),2)]
@@ -1263,8 +1263,8 @@
         
         kwargs = {}
         kwargs['Address'] = loc
-        kwargs['Children'] = [ch for ch in node.values()]
         kwargs['Parent'] = parent
+        kwargs['Axis']  = self.pf._get_param('divideaxis',location=loc)
         kwargs['Level']  = self.pf._get_param('level',location=loc)
         kwargs['LeftEdge'] = self.pf._get_param('leftedge',location=loc) 
         kwargs['RightEdge'] = self.pf._get_param('rightedge',location=loc)
@@ -1274,22 +1274,31 @@
         dx = self.pf._get_param('dx',location=loc)
         dy = self.pf._get_param('dy',location=loc)
         dz = self.pf._get_param('dz',location=loc)
-        kwargs['ActiveDimensions'] = (dx,dy,dz)
-        grid = self.grid(idx,self.pf.parameter_filename,self,**kwargs)
+        divdims = na.array([1,1,1])
+        if not kwargs['IsLeaf']: 
+            divdims[kwargs['Axis']] = 2
+        kwargs['ActiveDimensions'] = divdims
+        #Active dimensions:
+        #This is the number of childnodes, along with dimensiolaity
+        #ie, binary tree can be (2,1,1) but octree is (2,2,2)
+        
         idx+=1
-        grids += [grid,]
         #pdb.set_trace()
-        if kwargs['IsLeaf']:
-            return grids,idx
-        else:
+        children = []
+        if not kwargs['IsLeaf']:
             for child in node.values():
-                grids,idx=self._walk_nodes(node,child,grids,idx=idx)
+                children,idx=self._walk_nodes(node,child,children,idx=idx)
+        
+        kwargs['Children'] = children
+        grid = self.grid(idx,self.pf.parameter_filename,self,**kwargs)
+        grids += children
+        grids += [grid,]
         return grids,idx
-    
+
     def _populate_grid_objects(self):
         for g in self.grids:
             g._prepare_grid()
-        self.max_level = self._handle['root'].attrs['maxlevel']
+        self.max_level = cPickle.loads(self._handle['root'].attrs['maxlevel'])
     
     def _setup_unknown_fields(self):
         pass
@@ -1662,39 +1671,100 @@
         ogrid_file_locations = na.zeros((num_ogrids,6), dtype='int64')
         ochild_masks = na.zeros((num_ogrids, 8), dtype='int32')
         self.tree_proxy.fill_hierarchy_arrays(
+            self.pf["TopGridDimensions"],
             ogrid_left_edge, ogrid_right_edge,
             ogrid_levels, ogrid_file_locations, ochild_masks)
+        # Now we can rescale
+        mi, ma = ogrid_left_edge.min(), ogrid_right_edge.max()
+        DL = self.pf["DomainLeftEdge"]
+        DR = self.pf["DomainRightEdge"]
+        ogrid_left_edge = (ogrid_left_edge - mi)/(ma - mi) * (DR - DL) + DL
+        ogrid_right_edge = (ogrid_right_edge - mi)/(ma - mi) * (DR - DL) + DL
+        #import pdb;pdb.set_trace()
         # We now have enough information to run the patch coalescing 
         self.proto_grids = []
         for level in xrange(len(level_info)):
             if level_info[level] == 0: continue
             ggi = (ogrid_levels == level).ravel()
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * (2.0**(level+1))).astype('int64')
-            right_index = left_index + 2
+            mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
+            nd = self.pf["TopGridDimensions"] * 2**level
             dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
             fl = ogrid_file_locations[ggi,:]
             # Now our initial protosubgrid
-            initial_left = na.zeros(3, dtype='int64')
-            idims = na.ones(3, dtype='int64') * (2**(level+1))
             #if level == 6: raise RuntimeError
-            psg = ramses_reader.ProtoSubgrid(initial_left, idims,
-                            left_index, right_index, dims, fl)
-            self.proto_grids.append(self._recursive_patch_splitting(
-                    psg, idims, initial_left, 
-                    left_index, right_index, dims, fl))
+            # We want grids that cover no more than MAX_EDGE cells in every direction
+            MAX_EDGE = 128
+            psgs = []
+            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd).astype('int64')
+            right_index = left_index + 2
+            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
+            #lefts = zip(*[l.ravel() for l in lefts])
+            pbar = get_pbar("Re-gridding ", lefts[0].size)
+            min_ind = na.min(left_index, axis=0)
+            max_ind = na.max(right_index, axis=0)
+            for i,dli in enumerate(lefts[0]):
+                pbar.update(i)
+                if min_ind[0] > dli + nd[0]: continue
+                if max_ind[0] < dli: continue
+                idim = min(nd[0] - dli, MAX_EDGE)
+                gdi = ((dli  <= right_index[:,0])
+                     & (dli + idim >= left_index[:,0]))
+                if not na.any(gdi): continue
+                for dlj in lefts[1]:
+                    if min_ind[1] > dlj + nd[1]: continue
+                    if max_ind[1] < dlj: continue
+                    idim = min(nd[1] - dlj, MAX_EDGE)
+                    gdj = ((dlj  <= right_index[:,1])
+                         & (dlj + idim >= left_index[:,1])
+                         & (gdi))
+                    if not na.any(gdj): continue
+                    for dlk in lefts[2]:
+                        if min_ind[2] > dlk + nd[2]: continue
+                        if max_ind[2] < dlk: continue
+                        idim = min(nd[2] - dlk, MAX_EDGE)
+                        gdk = ((dlk  <= right_index[:,2])
+                             & (dlk + idim >= left_index[:,2])
+                             & (gdj))
+                        if not na.any(gdk): continue
+                        left = na.array([dli, dlj, dlk])
+                        domain_left = left.ravel()
+                        initial_left = na.zeros(3, dtype='int64') + domain_left
+                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
+                        # We want to find how many grids are inside.
+                        dleft_index = left_index[gdk,:]
+                        dright_index = right_index[gdk,:]
+                        ddims = dims[gdk,:]
+                        dfl = fl[gdk,:]
+                        psg = ramses_reader.ProtoSubgrid(initial_left, idims,
+                                        dleft_index, dright_index, ddims, dfl)
+                        #print "Gridding from %s to %s + %s" % (
+                        #    initial_left, initial_left, idims)
+                        if psg.efficiency <= 0: continue
+                        self.num_deep = 0
+                        psgs.extend(self._recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dright_index, ddims, dfl))
+                        #psgs.extend([psg])
+            pbar.finish()
+            self.proto_grids.append(psgs)
             sums = na.zeros(3, dtype='int64')
+            mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
             for g in self.proto_grids[level]:
                 sums += [s.sum() for s in g.sigs]
             assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
-    #num_deep = 0
+    num_deep = 0
 
-    #@num_deep_inc
+    @num_deep_inc
     def _recursive_patch_splitting(self, psg, dims, ind,
             left_index, right_index, gdims, fl):
-        min_eff = 0.2 # This isn't always respected.
+        min_eff = 0.1 # This isn't always respected.
+        if self.num_deep > 40:
+            # If we've recursed more than 100 times, we give up.
+            psg.efficiency = min_eff
+            return [psg]
         if psg.efficiency > min_eff or psg.efficiency < 0.0:
             return [psg]
         tt, ax, fp = psg.find_split()

Modified: trunk/yt/lagos/OutputTypes.py
==============================================================================
--- trunk/yt/lagos/OutputTypes.py	(original)
+++ trunk/yt/lagos/OutputTypes.py	Tue Aug 24 18:56:59 2010
@@ -439,6 +439,9 @@
         for i in self.parameters:
             if isinstance(self.parameters[i], types.TupleType):
                 self.parameters[i] = na.array(self.parameters[i])
+            if i.endswith("Units") and not i.startswith("Temperature"):
+                dataType = i[:-5]
+                self.conversion_factors[dataType] = self.parameters[i]
         for i in self.conversion_factors:
             if isinstance(self.conversion_factors[i], types.TupleType):
                 self.conversion_factors[i] = na.array(self.conversion_factors[i])
@@ -637,6 +640,8 @@
         # generalization.
         self.parameters["TopGridRank"] = 3
         self.parameters["RefineBy"] = 2
+        self.parameters["DomainLeftEdge"] = self.leftedge
+        self.parameters["DomainRightEdge"] = self.rightedge
         
         
     def _parse_parameter_file(self):
@@ -654,7 +659,7 @@
                 continue
             val = fh['root'].attrs[kw]
             if type(val)==type(''):
-                try:    val = cPickle.load(val)
+                try:    val = cPickle.loads(val)
                 except: pass
             #also, includes unit info
             setattr(self,kw,val)
@@ -662,9 +667,8 @@
     def _get_param(self,kw,location='/root'):
         fh = h5py.File(self.parameter_filename)
         val = fh[location].attrs[kw]
-        if type(val)==type(''):
-            try:    val = cPickle.load(val)
-            except: pass
+        try:    val = cPickle.loads(val)
+        except: pass
         return val
             
     def _set_units(self):
@@ -676,6 +680,7 @@
         self.time_units['1'] = 1
         self.units['1'] = 1.0
         self.units['unitary'] = 1.0
+        self.units['cm'] = 1.0
         seconds = 1 #self["Time"]
         self.time_units['years'] = seconds / (365*3600*24.0)
         self.time_units['days']  = seconds / (3600*24.0)
@@ -846,8 +851,6 @@
         self.storage_filename = storage_filename
 
         self.field_info = self._fieldinfo_class()
-        # hardcoded for now
-        self.parameters["InitialTime"] = 0.0
         # These should be explicitly obtained from the file, but for now that
         # will wait until a reorganization of the source tree and better
         # generalization.
@@ -855,6 +858,7 @@
         self.parameters["RefineBy"] = 2
         self.parameters["HydroMethod"] = 'flash' # always PPM DE
         self.parameters["Time"] = 1. # default unit is 1...
+        self._set_units()
         
     def _set_units(self):
         """
@@ -904,6 +908,8 @@
             [self._find_parameter("real", "%smin" % ax) for ax in 'xyz'])
         self.parameters["DomainRightEdge"] = na.array(
             [self._find_parameter("real", "%smax" % ax) for ax in 'xyz'])
+        self.parameters["InitialTime"] = \
+            float(self._find_parameter("real", "time", scalar=True))
         self._handle.close()
 
     @classmethod
@@ -978,7 +984,7 @@
         self.parameters["DomainRightEdge"] = na.ones(3, dtype='float64') \
                                            * rheader['boxlen']
         self.parameters["DomainLeftEdge"] = na.zeros(3, dtype='float64')
-        self.parameters["TopGridDimensions"] = na.zeros(3, dtype='int64') + 2
+        self.parameters["TopGridDimensions"] = na.ones(3, dtype='int32') * 2
 
     @classmethod
     def _is_valid(self, *args, **kwargs):

Modified: trunk/yt/lagos/UniversalFields.py
==============================================================================
--- trunk/yt/lagos/UniversalFields.py	(original)
+++ trunk/yt/lagos/UniversalFields.py	Tue Aug 24 18:56:59 2010
@@ -796,3 +796,12 @@
           validators=[ValidateSpatial(0)], convert_function=_convertDensity,
           display_name=r"\mathrm{Particle}\/\mathrm{Density})")
 
+def _MagneticEnergy(field,data):
+    """WARNING WARNING WARNING: Units are not yet known to be
+    correct. Trust the magnitude of this quantity at your own
+    risk. However, it should just be a multiplicative offset from
+    reality...
+    """
+    return (data["Bx"]**2 + data["By"]**2 + data["Bz"]**2)/2.
+add_field("MagneticEnergy",function=_MagneticEnergy,
+          units=r"")

Modified: trunk/yt/lagos/__init__.py
==============================================================================
--- trunk/yt/lagos/__init__.py	(original)
+++ trunk/yt/lagos/__init__.py	Tue Aug 24 18:56:59 2010
@@ -102,7 +102,7 @@
 
 from HaloFinding import *
 
-from TwoPointFunctions import *
+from two_point_functions import *
 
 # We load plugins.  Keep in mind, this can be fairly dangerous -
 # the primary purpose is to allow people to have a set of functions

Modified: trunk/yt/math_utils.py
==============================================================================
--- trunk/yt/math_utils.py	(original)
+++ trunk/yt/math_utils.py	Tue Aug 24 18:56:59 2010
@@ -29,10 +29,29 @@
 import math
 
 def periodic_dist(a, b, period):
-    """
-    Find the Euclidian periodic distance between two points.
-    *a* and *b* are array-like vectors, and *period* is a float or
-    array-like value for the periodic size of the volume.
+    r"""Find the Euclidian periodic distance between two points.
+    
+    Parameters
+    ----------
+    a : array or list
+        An array or list of floats.
+    
+    b : array of list
+        An array or list of floats.
+    
+    period : float or array or list
+        If the volume is symmetrically periodic, this can be a single float,
+        otherwise an array or list of floats giving the periodic size of the
+        volume for each dimension.
+
+    Examples
+    --------
+    >>> a = na.array([0.1, 0.1, 0.1])
+    >>> b = na.array([0.9, 0,9, 0.9])
+    >>> period = 1.
+    >>> dist = periodic_dist(a, b, 1.)
+    >>> dist
+    0.3464102
     """
     a = na.array(a)
     b = na.array(b)
@@ -43,4 +62,302 @@
     d = na.amin(c, axis=0)**2
     return math.sqrt(d.sum())
 
-    
\ No newline at end of file
+def rotate_vector_3D(a, dim, angle):
+    r"""Rotates the elements of an array around an axis by some angle.
+    
+    Given an array of 3D vectors a, this rotates them around a coordinate axis
+    by a clockwise angle. An alternative way to think about it is the
+    coordinate axes are rotated counterclockwise, which changes the directions
+    of the vectors accordingly.
+    
+    Parameters
+    ----------
+    a : array
+        An array of 3D vectors with dimension Nx3.
+    
+    dim : integer
+        A integer giving the axis around which the vectors will be rotated.
+        (x, y, z) = (0, 1, 2).
+    
+    angle : float
+        The angle in radians through which the vectors will be rotated
+        clockwise.
+    
+    Examples
+    --------
+    >>> a = na.array([[1, 1, 0], [1, 0, 1], [0, 1, 1], [1, 1, 1], [3, 4, 5]])
+    >>> b = rotate_vector_3D(a, 2, na.pi/2)
+    >>> print b
+    [[  1.00000000e+00  -1.00000000e+00   0.00000000e+00]
+    [  6.12323400e-17  -1.00000000e+00   1.00000000e+00]
+    [  1.00000000e+00   6.12323400e-17   1.00000000e+00]
+    [  1.00000000e+00  -1.00000000e+00   1.00000000e+00]
+    [  4.00000000e+00  -3.00000000e+00   5.00000000e+00]]
+    
+    """
+    mod = False
+    if len(a.shape) == 1:
+        mod = True
+        a = na.array([a])
+    if a.shape[1] !=3:
+        raise SyntaxError("The second dimension of the array a must be == 3!")
+    if dim == 0:
+        R = na.array([[1, 0,0],
+            [0, na.cos(angle), na.sin(angle)],
+            [0, -na.sin(angle), na.cos(angle)]])
+    elif dim == 1:
+        R = na.array([[na.cos(angle), 0, -na.sin(angle)],
+            [0, 1, 0],
+            [na.sin(angle), 0, na.cos(angle)]])
+    elif dim == 2:
+        R = na.array([[na.cos(angle), na.sin(angle), 0],
+            [-na.sin(angle), na.cos(angle), 0],
+            [0, 0, 1]])
+    else:
+        raise SyntaxError("dim must be 0, 1, or 2!")
+    if mod:
+        return na.dot(R, a.T).T[0]
+    else:
+        return na.dot(R, a.T).T
+    
+
+def modify_reference_frame(CoM, L, P, V):
+    r"""Rotates and translates data into a new reference frame to make
+    calculations easier.
+    
+    This is primarily useful for calculations of halo data.
+    The data is translated into the center of mass frame.
+    Next, it is rotated such that the angular momentum vector for the data
+    is aligned with the z-axis. Put another way, if one calculates the angular
+    momentum vector on the data that comes out of this function, it will
+    always be along the positive z-axis.
+    If the center of mass is re-calculated, it will be at the origin.
+    
+    Parameters
+    ----------
+    CoM : array
+        The center of mass in 3D.
+    
+    L : array
+        The angular momentum vector.
+    
+    P : array
+        The positions of the data to be modified (i.e. particle or grid cell
+        postions). The array should be Nx3.
+    
+    V : array
+        The velocities of the data to be modified (i.e. particle or grid cell
+        velocities). The array should be Nx3.
+    
+    Returns
+    -------
+    L : array
+        The angular momentum vector equal to [0, 0, 1] modulo machine error.
+    
+    P : array
+        The modified positional data.
+    
+    V : array
+        The modified velocity data.
+    
+    Examples
+    --------
+    >>> CoM = na.array([0.5, 0.5, 0.5])
+    >>> L = na.array([1, 0, 0])
+    >>> P = na.array([[1, 0.5, 0.5], [0, 0.5, 0.5], [0.5, 0.5, 0.5], [0, 0, 0]])
+    >>> V = p.copy()
+    >>> LL, PP, VV = modify_reference_frame(CoM, L, P, V)
+    >>> LL
+    array([  6.12323400e-17,   0.00000000e+00,   1.00000000e+00])
+    >>> PP
+    array([[  3.06161700e-17,   0.00000000e+00,   5.00000000e-01],
+           [ -3.06161700e-17,   0.00000000e+00,  -5.00000000e-01],
+           [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00],
+           [  5.00000000e-01,  -5.00000000e-01,  -5.00000000e-01]])
+    >>> VV
+    array([[ -5.00000000e-01,   5.00000000e-01,   1.00000000e+00],
+           [ -5.00000000e-01,   5.00000000e-01,   3.06161700e-17],
+           [ -5.00000000e-01,   5.00000000e-01,   5.00000000e-01],
+           [  0.00000000e+00,   0.00000000e+00,   0.00000000e+00]])
+
+    """
+    if (L == na.array([0, 0, 1.])).all():
+        # Whew! Nothing to do!
+        return L, P, V
+    # First translate the positions to center of mass reference frame.
+    P = P - CoM
+    # Now find the angle between modified L and the x-axis.
+    LL = L.copy()
+    LL[2] = 0.
+    theta = na.arccos(na.inner(LL, [1.,0,0])/na.inner(LL,LL)**.5)
+    if L[1] < 0:
+        theta = -theta
+    # Now rotate all the position, velocity, and L vectors by this much around
+    # the z axis.
+    P = rotate_vector_3D(P, 2, theta)
+    V = rotate_vector_3D(V, 2, theta)
+    L = rotate_vector_3D(L, 2, theta)
+    # Now find the angle between L and the z-axis.
+    theta = na.arccos(na.inner(L, [0,0,1])/na.inner(L,L)**.5)
+    # This time we rotate around the y axis.
+    P = rotate_vector_3D(P, 1, theta)
+    V = rotate_vector_3D(V, 1, theta)
+    L = rotate_vector_3D(L, 1, theta)
+    return L, P, V
+
+def compute_rotational_velocity(CoM, L, P, V):
+    r"""Computes the rotational velocity for some data around an axis.
+    
+    This is primarily for halo computations.
+    Given some data, this computes the circular rotational velocity of each
+    point (particle) in reference to the axis defined by the angular momentum
+    vector.
+    This is accomplished by converting the reference frame of the center of
+    mass of the halo.
+    
+    Parameters
+    ----------
+    CoM : array
+        The center of mass in 3D.
+    
+    L : array
+        The angular momentum vector.
+    
+    P : array
+        The positions of the data to be modified (i.e. particle or grid cell
+        postions). The array should be Nx3.
+    
+    V : array
+        The velocities of the data to be modified (i.e. particle or grid cell
+        velocities). The array should be Nx3.
+    
+    Returns
+    -------
+    v : array
+        An array N elements long that gives the circular rotational velocity
+        for each datum (particle).
+    
+    Examples
+    --------
+    >>> CoM = na.array([0, 0, 0])
+    >>> L = na.array([0, 0, 1])
+    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> circV = compute_rotational_velocity(CoM, L, P, V)
+    >>> circV
+    array([ 1.        ,  0.        ,  0.        ,  1.41421356])
+
+    """
+    # First we translate into the simple coordinates.
+    L, P, V = modify_reference_frame(CoM, L, P, V)
+    # Find the vector in the plane of the galaxy for each position point
+    # that is perpendicular to the radial vector.
+    radperp = na.cross([0, 0, 1], P)
+    # Find the component of the velocity along the radperp vector.
+    # Unf., I don't think there's a better way to do this.
+    res = na.empty(V.shape[0], dtype='float64')
+    for i, rp in enumerate(radperp):
+        temp = na.dot(rp, V[i]) / na.dot(rp, rp) * rp
+        res[i] = na.dot(temp, temp)**0.5
+    return res
+    
+def compute_parallel_velocity(CoM, L, P, V):
+    r"""Computes the parallel velocity for some data around an axis.
+    
+    This is primarily for halo computations.
+    Given some data, this computes the velocity component along the angular
+    momentum vector.
+    This is accomplished by converting the reference frame of the center of
+    mass of the halo.
+    
+    Parameters
+    ----------
+    CoM : array
+        The center of mass in 3D.
+    
+    L : array
+        The angular momentum vector.
+    
+    P : array
+        The positions of the data to be modified (i.e. particle or grid cell
+        postions). The array should be Nx3.
+    
+    V : array
+        The velocities of the data to be modified (i.e. particle or grid cell
+        velocities). The array should be Nx3.
+    
+    Returns
+    -------
+    v : array
+        An array N elements long that gives the parallel velocity for
+        each datum (particle).
+    
+    Examples
+    --------
+    >>> CoM = na.array([0, 0, 0])
+    >>> L = na.array([0, 0, 1])
+    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> paraV = compute_parallel_velocity(CoM, L, P, V)
+    >>> paraV
+    array([10, -1,  1, -1])
+    
+    """
+    # First we translate into the simple coordinates.
+    L, P, V = modify_reference_frame(CoM, L, P, V)
+    # And return just the z-axis velocities.
+    return V[:,2]
+
+def compute_radial_velocity(CoM, L, P, V):
+    r"""Computes the radial velocity for some data around an axis.
+    
+    This is primarily for halo computations.
+    Given some data, this computes the radial velocity component for the data.
+    This is accomplished by converting the reference frame of the center of
+    mass of the halo.
+    
+    Parameters
+    ----------
+    CoM : array
+        The center of mass in 3D.
+    
+    L : array
+        The angular momentum vector.
+    
+    P : array
+        The positions of the data to be modified (i.e. particle or grid cell
+        postions). The array should be Nx3.
+    
+    V : array
+        The velocities of the data to be modified (i.e. particle or grid cell
+        velocities). The array should be Nx3.
+    
+    Returns
+    -------
+    v : array
+        An array N elements long that gives the radial velocity for
+        each datum (particle).
+    
+    Examples
+    --------
+    >>> CoM = na.array([0, 0, 0])
+    >>> L = na.array([0, 0, 1])
+    >>> P = na.array([[1, 0, 0], [1, 1, 1], [0, 0, 1], [1, 1, 0]])
+    >>> V = na.array([[0, 1, 10], [-1, -1, -1], [1, 1, 1], [1, -1, -1]])
+    >>> radV = compute_radial_velocity(CoM, L, P, V)
+    >>> radV
+    array([ 1.        ,  1.41421356 ,  0.        ,  0.])
+    
+    """
+    # First we translate into the simple coordinates.
+    L, P, V = modify_reference_frame(CoM, L, P, V)
+    # We find the tangential velocity by dotting the velocity vector
+    # with the cylindrical radial vector for this point.
+    # Unf., I don't think there's a better way to do this.
+    P[:,2] = 0
+    res = na.empty(V.shape[0], dtype='float64')
+    for i, rad in enumerate(P):
+        temp = na.dot(rad, V[i]) / na.dot(rad, rad) * rad
+        res[i] = na.dot(temp, temp)**0.5
+    return res
+

Modified: trunk/yt/ramses_reader.pyx
==============================================================================
--- trunk/yt/ramses_reader.pyx	(original)
+++ trunk/yt/ramses_reader.pyx	Tue Aug 24 18:56:59 2010
@@ -247,6 +247,7 @@
         unsigned get_cell_father()
         bint is_finest(int ison)
         int get_absolute_position()
+        int get_domain()
 
     cdef cppclass RAMSES_tree:
         # This is, strictly speaking, not a header.  But, I believe it is
@@ -374,7 +375,7 @@
     cdef RAMSES_tree** trees
     cdef RAMSES_hydro_data*** hydro_datas
 
-    cdef int *loaded
+    cdef int **loaded
 
     cdef public object field_ind
     cdef public object field_names
@@ -403,35 +404,41 @@
             local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
             self.hydro_datas[idomain - 1] = <RAMSES_hydro_data **>\
                 malloc(sizeof(RAMSES_hydro_data*) * local_hydro_data.m_nvars)
-            del local_hydro_data
             for ii in range(local_hydro_data.m_nvars):
                 self.hydro_datas[idomain - 1][ii] = \
                     new RAMSES_hydro_data(deref(local_tree))
             self.trees[idomain - 1] = local_tree
             # We do not delete anything
+            if idomain < self.rsnap.m_header.ncpu: del local_hydro_data
         # Only once, we read all the field names
         self.nfields = local_hydro_data.m_nvars
         cdef string *field_name
         self.field_names = []
         self.field_ind = {}
-        self.loaded = <int *> malloc(sizeof(int) * local_hydro_data.m_nvars)
+        self.loaded = <int **> malloc(sizeof(int) * local_hydro_data.m_nvars)
+        for idomain in range(self.ndomains):
+            self.loaded[idomain] = <int *> malloc(
+                sizeof(int) * local_hydro_data.m_nvars)
+            for ifield in range(local_hydro_data.m_nvars):
+                self.loaded[idomain][ifield] = 0
         for ifield in range(local_hydro_data.m_nvars):
             field_name = &(local_hydro_data.m_varnames[ifield])
             # Does this leak?
             self.field_names.append(field_name.c_str())
             self.field_ind[self.field_names[-1]] = ifield
-            self.loaded[ifield] = 0
         # This all needs to be cleaned up in the deallocator
+        del local_hydro_data
 
     def __dealloc__(self):
         cdef int idomain, ifield
         for idomain in range(self.ndomains):
-            for ifield in range(self.nfields):
-                if self.hydro_datas[idomain][ifield] != NULL:
-                    del self.hydro_datas[idomain][ifield]
+            #for ifield in range(self.nfields):
+            #    if self.hydro_datas[idomain][ifield] != NULL:
+            #        del self.hydro_datas[idomain][ifield]
             if self.trees[idomain] != NULL:
                 del self.trees[idomain]
             free(self.hydro_datas[idomain])
+            free(self.loaded[idomain])
         free(self.trees)
         free(self.hydro_datas)
         free(self.loaded)
@@ -445,10 +452,12 @@
         cdef RAMSES_tree *local_tree
         cdef RAMSES_hydro_data *local_hydro_data
         cdef RAMSES_level *local_level
+        cdef tree_iterator grid_it, grid_end
 
         # All the loop-local pointers must be declared up here
 
         cell_count = []
+        cdef int local_count = 0
         for ilevel in range(self.rsnap.m_header.levelmax + 1):
             cell_count.append(0)
         for idomain in range(1, self.rsnap.m_header.ncpu + 1):
@@ -457,8 +466,14 @@
             local_tree.read()
             local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
             for ilevel in range(local_tree.m_maxlevel + 1):
+                local_count = 0
                 local_level = &local_tree.m_AMR_levels[ilevel]
-                cell_count[ilevel] += local_level.size()
+                grid_it = local_tree.begin(ilevel)
+                grid_end = local_tree.end(ilevel)
+                while grid_it != grid_end:
+                    local_count += (grid_it.get_domain() == idomain)
+                    grid_it.next()
+                cell_count[ilevel] += local_count
             del local_tree, local_hydro_data
 
         return cell_count
@@ -467,10 +482,11 @@
         # this domain_index must be zero-indexed
         cdef int varindex = self.field_ind[varname]
         cdef string *field_name = new string(varname)
-        if self.loaded[varindex] == 1: return
-        print "READING FROM DISK", varname
+        if self.loaded[domain_index][varindex] == 1:
+            return
+        print "READING FROM DISK", varname, domain_index, varindex
         self.hydro_datas[domain_index][varindex].read(deref(field_name))
-        self.loaded[varindex] = 1
+        self.loaded[domain_index][varindex] = 1
         del field_name
 
     def clear_tree(self, char *varname, int domain_index):
@@ -478,11 +494,11 @@
         # We delete and re-create
         cdef int varindex = self.field_ind[varname]
         cdef string *field_name = new string(varname)
-        if self.loaded[varindex] == 0: return
+        if self.loaded[domain_index][varindex] == 0: return
         del self.hydro_datas[domain_index][varindex]
         self.hydro_datas[domain_index - 1][varindex] = \
             new RAMSES_hydro_data(deref(self.trees[domain_index]))
-        self.loaded[varindex] = 0
+        self.loaded[domain_index][varindex] = 0
         del field_name
 
     def get_file_info(self):
@@ -504,9 +520,20 @@
         header_info["unit_l"] = self.rsnap.m_header.unit_l
         header_info["unit_d"] = self.rsnap.m_header.unit_d
         header_info["unit_t"] = self.rsnap.m_header.unit_t
+
+        # Now we grab some from the trees
+        cdef np.ndarray[np.int32_t, ndim=1] top_grid_dims = np.zeros(3, "int32")
+        cdef int i
+        for i in range(3):
+            # Note that nx is really boundary conditions.  We always have 2.
+            top_grid_dims[i] = self.trees[0].m_header.nx[i]
+            top_grid_dims[i] = 2
+        header_info["nx"] = top_grid_dims
+
         return header_info
 
     def fill_hierarchy_arrays(self, 
+                              np.ndarray[np.int32_t, ndim=1] top_grid_dims,
                               np.ndarray[np.float64_t, ndim=2] left_edges,
                               np.ndarray[np.float64_t, ndim=2] right_edges,
                               np.ndarray[np.int32_t, ndim=2] grid_levels,
@@ -523,7 +550,7 @@
 
         cdef tree_iterator grid_it, grid_end, father_it
         cdef vec[double] gvec
-        cdef int grid_ind = 0
+        cdef int grid_ind = 0, grid_aind = 0
         cdef unsigned parent_ind
         cdef bint ci
 
@@ -540,33 +567,40 @@
             for ilevel in range(local_tree.m_maxlevel + 1):
                 # this gets overwritten for every domain, which is okay
                 level_cell_counts[ilevel] = grid_ind 
-                grid_half_width = self.rsnap.m_header.boxlen / (2**(ilevel + 1))
+                #grid_half_width = self.rsnap.m_header.boxlen / \
+                grid_half_width = 1.0 / \
+                    (2**(ilevel) * top_grid_dims[0])
                 grid_it = local_tree.begin(ilevel)
                 grid_end = local_tree.end(ilevel)
                 while grid_it != grid_end:
+                    if grid_it.get_domain() != idomain:
+                        grid_ind += 1
+                        grid_it.next()
+                        continue
                     gvec = local_tree.grid_pos_double(grid_it)
-                    left_edges[grid_ind, 0] = gvec.x - grid_half_width
-                    left_edges[grid_ind, 1] = gvec.y - grid_half_width
-                    left_edges[grid_ind, 2] = gvec.z - grid_half_width
-                    right_edges[grid_ind, 0] = gvec.x + grid_half_width
-                    right_edges[grid_ind, 1] = gvec.y + grid_half_width
-                    right_edges[grid_ind, 2] = gvec.z + grid_half_width
-                    grid_levels[grid_ind, 0] = ilevel
+                    left_edges[grid_aind, 0] = gvec.x - grid_half_width
+                    left_edges[grid_aind, 1] = gvec.y - grid_half_width
+                    left_edges[grid_aind, 2] = gvec.z - grid_half_width
+                    right_edges[grid_aind, 0] = gvec.x + grid_half_width
+                    right_edges[grid_aind, 1] = gvec.y + grid_half_width
+                    right_edges[grid_aind, 2] = gvec.z + grid_half_width
+                    grid_levels[grid_aind, 0] = ilevel
                     # Now the harder part
                     father_it = grid_it.get_parent()
-                    grid_file_locations[grid_ind, 0] = <np.int64_t> idomain
-                    grid_file_locations[grid_ind, 1] = grid_ind - level_cell_counts[ilevel]
+                    grid_file_locations[grid_aind, 0] = <np.int64_t> idomain
+                    grid_file_locations[grid_aind, 1] = grid_ind - level_cell_counts[ilevel]
                     parent_ind = father_it.get_absolute_position()
                     if ilevel > 0:
                         # We calculate the REAL parent index
-                        grid_file_locations[grid_ind, 2] = \
+                        grid_file_locations[grid_aind, 2] = \
                             level_cell_counts[ilevel - 1] + parent_ind
                     else:
-                        grid_file_locations[grid_ind, 2] = -1
+                        grid_file_locations[grid_aind, 2] = -1
                     for ci in range(8):
                         rr = <np.int32_t> grid_it.is_finest(ci)
-                        child_mask[grid_ind, ci] = rr
+                        child_mask[grid_aind, ci] = rr
                     grid_ind += 1
+                    grid_aind += 1
                     grid_it.next()
             del local_tree, local_hydro_data
 
@@ -609,7 +643,7 @@
 
         cdef int gi, i, j, k, domain, offset
         cdef int ir, jr, kr
-        cdef int offi, offj, offk
+        cdef int offi, offj, offk, odind
         cdef np.int64_t di, dj, dk
         cdef np.ndarray[np.int64_t, ndim=1] ogrid_info
         cdef np.ndarray[np.int64_t, ndim=1] og_start_index
@@ -623,6 +657,7 @@
         for gi in range(len(component_grid_info)):
             ogrid_info = component_grid_info[gi]
             domain = ogrid_info[0]
+            #print "Loading", domain, ogrid_info
             self.ensure_loaded(field, domain - 1)
             local_tree = self.trees[domain - 1]
             local_hydro_data = self.hydro_datas[domain - 1][varindex]
@@ -666,8 +701,8 @@
     cdef public object grid_file_locations
     cdef public object dd
         
-    #@cython.boundscheck(False)
-    #@cython.wraparound(False)
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def __cinit__(self,
                    np.ndarray[np.int64_t, ndim=1] left_index,
                    np.ndarray[np.int64_t, ndim=1] dimensions, 
@@ -754,6 +789,8 @@
         #print "Efficiency is %0.3e" % (efficiency)
         self.efficiency = efficiency
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def find_split(self):
         # First look for zeros
         cdef int i, center, ax
@@ -768,7 +805,7 @@
             for i in range(self.dimensions[ax]):
                 if sig[i] == 0 and i > 0 and i < self.dimensions[ax] - 1:
                     #print "zero: %s (%s)" % (i, self.dimensions[ax])
-                    return 'zs', ax, i
+                    return 0, ax, i
         zcstrength = 0
         zcp = 0
         zca = -1
@@ -792,8 +829,10 @@
                         zca = ax
             free(sig2d)
         #print "zcp: %s (%s)" % (zcp, self.dimensions[ax])
-        return 'zc', ax, zcp
+        return 1, ax, zcp
 
+    @cython.boundscheck(False)
+    @cython.wraparound(False)
     def get_properties(self):
         cdef np.ndarray[np.int64_t, ndim=2] tr = np.empty((3,3), dtype='int64')
         cdef int i



More information about the yt-svn mailing list