[Yt-svn] yt: 2 new changesets

hg at spacepope.org hg at spacepope.org
Thu Aug 12 16:35:25 PDT 2010


hg Repository: yt
details:   yt/rev/33526550e8c6
changeset: 1945:33526550e8c6
user:      Matthew Turk <matthewturk at gmail.com>
date:
Thu Aug 12 16:34:51 2010 -0700
description:
Fixes for RAMSES, including for multi-domain datasets.  Still some segfaults
and slowness.

hg Repository: yt
details:   yt/rev/85a25c76e403
changeset: 1946:85a25c76e403
user:      Matthew Turk <matthewturk at gmail.com>
date:
Thu Aug 12 16:35:20 2010 -0700
description:
Merging

diffstat:

 yt/lagos/GadgetFields.py  |  86 +++++++++++++++++++++++++++++++++++++++++++
 yt/lagos/HierarchyType.py |  82 ++++++++++++++++++++++++++++++++++------
 yt/lagos/OutputTypes.py   |   2 +-
 yt/ramses_reader.pyx      |  90 +++++++++++++++++++++++++++++---------------
 4 files changed, 215 insertions(+), 45 deletions(-)

diffs (truncated from 460 to 300 lines):

diff -r 64ee81fb9b62 -r 85a25c76e403 yt/lagos/GadgetFields.py
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/yt/lagos/GadgetFields.py	Thu Aug 12 16:35:20 2010 -0700
@@ -0,0 +1,86 @@
+"""
+Gadget-specific fields
+
+Author: Christopher E Moody <juxtaposicion at gmail.com>
+Affiliation: UC Santa Cruz
+Homepage: http://yt.enzotools.org/
+License:
+  Copyright (C) 2010 Christopher E Moody, Matthew Turk.  All Rights Reserved.
+
+  This file is part of yt.
+
+  yt is free software; you can redistribute it and/or modify
+  it under the terms of the GNU General Public License as published by
+  the Free Software Foundation; either version 3 of the License, or
+  (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful,
+  but WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+  GNU General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+
+from UniversalFields import *
+class GadgetFieldContainer(CodeFieldInfoContainer):
+    _shared_state = {}
+    _field_list = {}
+GadgetFieldInfo = GadgetFieldContainer()
+add_gadget_field = GadgetFieldInfo.add_field
+
+add_field = add_gadget_field
+
+translation_dict = {"Position": "POS",
+                    "Velocity": "VEL",
+                    "ID": "ID",
+                    "Mass":"MASS"
+                   }
+
+for f,v in translation_dict.items():
+    if v not in GadgetFieldInfo:
+        add_field(f, function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("POS")],
+          units=r"\rm{cm}")
+          
+add_field("Density", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("POS")],
+          units=r"\rm{cm}")
+
+add_field("VEL", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("VEL")],
+          units=r"\rm{cm}/\rm{s}")
+
+add_field("ID", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("ID")],
+          units=r"")
+
+add_field("MASS", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("MASS")],
+          units=r"\rm{g}")
+
+add_field("U", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("U")],
+          units=r"")
+
+add_field("NE", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("NE")],
+          units=r"")
+
+add_field("POT", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("POT")],
+          units=r"")
+
+add_field("ACCE", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("ACCE")],
+          units=r"")
+
+add_field("ENDT", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("ENDT")],
+          units=r"")
+
+add_field("TSTP", function=lambda a,b: None, take_log=True,
+          validators = [ValidateDataField("TSTP")],
+          units=r"")
+
diff -r 64ee81fb9b62 -r 85a25c76e403 yt/lagos/HierarchyType.py
--- a/yt/lagos/HierarchyType.py	Tue Aug 10 21:17:57 2010 -0700
+++ b/yt/lagos/HierarchyType.py	Thu Aug 12 16:35:20 2010 -0700
@@ -1665,7 +1665,13 @@
             self.pf["TopGridDimensions"],
             ogrid_left_edge, ogrid_right_edge,
             ogrid_levels, ogrid_file_locations, ochild_masks)
-        import pdb;pdb.set_trace()
+        # Now we can rescale
+        mi, ma = ogrid_left_edge.min(), ogrid_right_edge.max()
+        DL = self.pf["DomainLeftEdge"]
+        DR = self.pf["DomainRightEdge"]
+        ogrid_left_edge = (ogrid_left_edge - mi)/(ma - mi) * (DR - DL) + DL
+        ogrid_right_edge = (ogrid_right_edge - mi)/(ma - mi) * (DR - DL) + DL
+        #import pdb;pdb.set_trace()
         # We now have enough information to run the patch coalescing 
         self.proto_grids = []
         for level in xrange(len(level_info)):
@@ -1673,19 +1679,65 @@
             ggi = (ogrid_levels == level).ravel()
             mylog.info("Re-gridding level %s: %s octree grids", level, ggi.sum())
             nd = self.pf["TopGridDimensions"] * 2**level
-            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd).astype('int64')
-            right_index = left_index + 2
             dims = na.ones((ggi.sum(), 3), dtype='int64') * 2
             fl = ogrid_file_locations[ggi,:]
             # Now our initial protosubgrid
-            initial_left = na.zeros(3, dtype='int64')
-            idims = na.ones(3, dtype='int64') * nd
             #if level == 6: raise RuntimeError
-            psg = ramses_reader.ProtoSubgrid(initial_left, idims,
-                            left_index, right_index, dims, fl)
-            self.proto_grids.append(self._recursive_patch_splitting(
-                    psg, idims, initial_left, 
-                    left_index, right_index, dims, fl))
+            # We want grids that cover no more than MAX_EDGE cells in every direction
+            MAX_EDGE = 128
+            psgs = []
+            left_index = na.rint((ogrid_left_edge[ggi,:]) * nd).astype('int64')
+            right_index = left_index + 2
+            lefts = [na.mgrid[0:nd[i]:MAX_EDGE] for i in range(3)]
+            #lefts = zip(*[l.ravel() for l in lefts])
+            pbar = get_pbar("Re-gridding ", lefts[0].size)
+            min_ind = na.min(left_index, axis=0)
+            max_ind = na.max(right_index, axis=0)
+            for i,dli in enumerate(lefts[0]):
+                pbar.update(i)
+                if min_ind[0] > dli + nd[0]: continue
+                if max_ind[0] < dli: continue
+                idim = min(nd[0] - dli, MAX_EDGE)
+                gdi = ((dli  <= right_index[:,0])
+                     & (dli + idim >= left_index[:,0]))
+                if not na.any(gdi): continue
+                for dlj in lefts[1]:
+                    if min_ind[1] > dlj + nd[1]: continue
+                    if max_ind[1] < dlj: continue
+                    idim = min(nd[1] - dlj, MAX_EDGE)
+                    gdj = ((dlj  <= right_index[:,1])
+                         & (dlj + idim >= left_index[:,1])
+                         & (gdi))
+                    if not na.any(gdj): continue
+                    for dlk in lefts[2]:
+                        if min_ind[2] > dlk + nd[2]: continue
+                        if max_ind[2] < dlk: continue
+                        idim = min(nd[2] - dlk, MAX_EDGE)
+                        gdk = ((dlk  <= right_index[:,2])
+                             & (dlk + idim >= left_index[:,2])
+                             & (gdj))
+                        if not na.any(gdk): continue
+                        left = na.array([dli, dlj, dlk])
+                        domain_left = left.ravel()
+                        initial_left = na.zeros(3, dtype='int64') + domain_left
+                        idims = na.ones(3, dtype='int64') * na.minimum(nd - domain_left, MAX_EDGE)
+                        # We want to find how many grids are inside.
+                        dleft_index = left_index[gdk,:]
+                        dright_index = right_index[gdk,:]
+                        ddims = dims[gdk,:]
+                        dfl = fl[gdk,:]
+                        psg = ramses_reader.ProtoSubgrid(initial_left, idims,
+                                        dleft_index, dright_index, ddims, dfl)
+                        #print "Gridding from %s to %s + %s" % (
+                        #    initial_left, initial_left, idims)
+                        if psg.efficiency <= 0: continue
+                        self.num_deep = 0
+                        psgs.extend(self._recursive_patch_splitting(
+                            psg, idims, initial_left, 
+                            dleft_index, dright_index, ddims, dfl))
+                        #psgs.extend([psg])
+            pbar.finish()
+            self.proto_grids.append(psgs)
             sums = na.zeros(3, dtype='int64')
             mylog.info("Final grid count: %s", len(self.proto_grids[level]))
             if len(self.proto_grids[level]) == 1: continue
@@ -1694,12 +1746,16 @@
             assert(na.all(sums == dims.prod(axis=1).sum()))
         self.num_grids = sum(len(l) for l in self.proto_grids)
 
-    #num_deep = 0
+    num_deep = 0
 
-    #@num_deep_inc
+    @num_deep_inc
     def _recursive_patch_splitting(self, psg, dims, ind,
             left_index, right_index, gdims, fl):
-        min_eff = 0.2 # This isn't always respected.
+        min_eff = 0.1 # This isn't always respected.
+        if self.num_deep > 40:
+            # If we've recursed more than 100 times, we give up.
+            psg.efficiency = min_eff
+            return [psg]
         if psg.efficiency > min_eff or psg.efficiency < 0.0:
             return [psg]
         tt, ax, fp = psg.find_split()
diff -r 64ee81fb9b62 -r 85a25c76e403 yt/lagos/OutputTypes.py
--- a/yt/lagos/OutputTypes.py	Tue Aug 10 21:17:57 2010 -0700
+++ b/yt/lagos/OutputTypes.py	Thu Aug 12 16:35:20 2010 -0700
@@ -978,7 +978,7 @@
         self.parameters["DomainRightEdge"] = na.ones(3, dtype='float64') \
                                            * rheader['boxlen']
         self.parameters["DomainLeftEdge"] = na.zeros(3, dtype='float64')
-        self.parameters["TopGridDimensions"] = rheader["nx"]
+        self.parameters["TopGridDimensions"] = na.ones(3, dtype='int32') * 2
 
     @classmethod
     def _is_valid(self, *args, **kwargs):
diff -r 64ee81fb9b62 -r 85a25c76e403 yt/ramses_reader.pyx
--- a/yt/ramses_reader.pyx	Tue Aug 10 21:17:57 2010 -0700
+++ b/yt/ramses_reader.pyx	Thu Aug 12 16:35:20 2010 -0700
@@ -247,6 +247,7 @@
         unsigned get_cell_father()
         bint is_finest(int ison)
         int get_absolute_position()
+        int get_domain()
 
     cdef cppclass RAMSES_tree:
         # This is, strictly speaking, not a header.  But, I believe it is
@@ -374,7 +375,7 @@
     cdef RAMSES_tree** trees
     cdef RAMSES_hydro_data*** hydro_datas
 
-    cdef int *loaded
+    cdef int **loaded
 
     cdef public object field_ind
     cdef public object field_names
@@ -414,25 +415,30 @@
         cdef string *field_name
         self.field_names = []
         self.field_ind = {}
-        self.loaded = <int *> malloc(sizeof(int) * local_hydro_data.m_nvars)
+        self.loaded = <int **> malloc(sizeof(int) * local_hydro_data.m_nvars)
+        for idomain in range(self.ndomains):
+            self.loaded[idomain] = <int *> malloc(
+                sizeof(int) * local_hydro_data.m_nvars)
+            for ifield in range(local_hydro_data.m_nvars):
+                self.loaded[idomain][ifield] = 0
         for ifield in range(local_hydro_data.m_nvars):
             field_name = &(local_hydro_data.m_varnames[ifield])
             # Does this leak?
             self.field_names.append(field_name.c_str())
             self.field_ind[self.field_names[-1]] = ifield
-            self.loaded[ifield] = 0
         # This all needs to be cleaned up in the deallocator
         del local_hydro_data
 
     def __dealloc__(self):
         cdef int idomain, ifield
         for idomain in range(self.ndomains):
-            for ifield in range(self.nfields):
-                if self.hydro_datas[idomain][ifield] != NULL:
-                    del self.hydro_datas[idomain][ifield]
+            #for ifield in range(self.nfields):
+            #    if self.hydro_datas[idomain][ifield] != NULL:
+            #        del self.hydro_datas[idomain][ifield]
             if self.trees[idomain] != NULL:
                 del self.trees[idomain]
             free(self.hydro_datas[idomain])
+            free(self.loaded[idomain])
         free(self.trees)
         free(self.hydro_datas)
         free(self.loaded)
@@ -446,10 +452,12 @@
         cdef RAMSES_tree *local_tree
         cdef RAMSES_hydro_data *local_hydro_data
         cdef RAMSES_level *local_level
+        cdef tree_iterator grid_it, grid_end
 
         # All the loop-local pointers must be declared up here
 
         cell_count = []
+        cdef int local_count = 0
         for ilevel in range(self.rsnap.m_header.levelmax + 1):
             cell_count.append(0)
         for idomain in range(1, self.rsnap.m_header.ncpu + 1):
@@ -458,8 +466,14 @@
             local_tree.read()
             local_hydro_data = new RAMSES_hydro_data(deref(local_tree))
             for ilevel in range(local_tree.m_maxlevel + 1):
+                local_count = 0
                 local_level = &local_tree.m_AMR_levels[ilevel]
-                cell_count[ilevel] += local_level.size()
+                grid_it = local_tree.begin(ilevel)
+                grid_end = local_tree.end(ilevel)
+                while grid_it != grid_end:
+                    local_count += (grid_it.get_domain() == idomain)
+                    grid_it.next()
+                cell_count[ilevel] += local_count
             del local_tree, local_hydro_data
 
         return cell_count



More information about the yt-svn mailing list