[Yt-svn] yt-commit r1555 - in trunk: tests yt/extensions/volume_rendering yt/lagos
mturk at wrangler.dreamhost.com
mturk at wrangler.dreamhost.com
Sat Dec 5 23:16:45 PST 2009
Author: mturk
Date: Sat Dec 5 23:16:43 2009
New Revision: 1555
URL: http://yt.enzotools.org/changeset/1555
Log:
Backport from mercurial of:
* Some fixes to the grid partitioner, and a sketch of the new version
* Integer-coordinate smoothed covering grid, which is more reliable and better quality (still disabled for GZ generation)
* Test for smoothed covering grid
* Stephen's check on Particle IDs and his mpi_exit function
* Britton's fix on particle field uniqueness
Modified:
trunk/tests/test_lagos.py
trunk/yt/extensions/volume_rendering/grid_partitioner.py
trunk/yt/lagos/BaseDataTypes.py
trunk/yt/lagos/EnzoDefs.py
trunk/yt/lagos/HaloFinding.py
trunk/yt/lagos/ParallelTools.py
trunk/yt/lagos/ParticleIO.py
Modified: trunk/tests/test_lagos.py
==============================================================================
--- trunk/tests/test_lagos.py (original)
+++ trunk/tests/test_lagos.py Sat Dec 5 23:16:43 2009
@@ -313,7 +313,21 @@
self.assertFalse(na.any(na.isnan(cg["Temperature"])))
self.assertFalse(na.any(cg["Temperature"]==-999))
-
+class TestIntSmoothedCoveringGrid(LagosTestingBase, unittest.TestCase):
+ def setUp(self):
+ LagosTestingBase.setUp(self)
+
+ def testCoordinates(self):
+ # We skip the first grid because it has ghost zones on all sides
+ for g in self.hierarchy.grids[1:]:
+ LE = g.LeftEdge - g.dds
+ level = g.Level
+ dims = g.ActiveDimensions + 2
+ g1 = self.hierarchy.si_covering_grid(level, LE, dims)
+ g2 = g.retrieve_ghost_zones(1, ["x","y","z"], smoothed=False)
+ for field in 'xyz':
+ diff = na.abs((g1[field] - g2[field])/(g1[field] + g2[field]))
+ self.assertAlmostEqual(diff.max(), 0.0, 1e-14)
class TestDataCube(LagosTestingBase, unittest.TestCase):
def setUp(self):
Modified: trunk/yt/extensions/volume_rendering/grid_partitioner.py
==============================================================================
--- trunk/yt/extensions/volume_rendering/grid_partitioner.py (original)
+++ trunk/yt/extensions/volume_rendering/grid_partitioner.py Sat Dec 5 23:16:43 2009
@@ -27,7 +27,7 @@
from yt.funcs import *
import h5py
-from yt.utils import PartitionedGrid
+from yt.amr_utils import PartitionedGrid
def partition_grid(start_grid, field, log_field = True, threshold = None):
if threshold is not None:
@@ -69,7 +69,7 @@
grids = []
cim = grid.child_index_mask
for xs, xe in zip(x_vert[:-1], x_vert[1:]):
- for ys, ye in zip(y_vert[:-1:-1], y_vert[1::-1]):
+ for ys, ye in zip(y_vert[:-1], y_vert[1:]):
for zs, ze in zip(z_vert[:-1], z_vert[1:]):
sl = (slice(xs, xe), slice(ys, ye), slice(zs, ze))
dd = cim[sl]
@@ -150,19 +150,17 @@
self.dims = dims
cv = []
self._cim = cim_base
- for vertex in source_vertices:
- if na.any(vertex - source_offset >= 0) or \
- na.any(vertex - source_offset < dims):
- cv.append(vertex)
- self.child_vertices = na.array(cv)
+ self.child_vertices = source_vertices
@property
def cim(self):
+ return self._cim[self.sl]
+
+ @property
+ def sl(self):
sls = self.source_offset
sle = self.source_offset + self.dims
- return self._cim[sls[0]:sle[0],
- sls[1]:sle[1],
- sls[2]:sle[2]]
+ return tuple([slice(sls[i], sle[i]) for i in range(3)])
def split(self, axis, coord):
dims_left = self.dims.copy()
@@ -224,22 +222,27 @@
left_region, right_region = region.split(axis, split_coord)
lrc = na.unique(left_region.cim)
rrc = na.unique(right_region.cim)
- if lrc.size > 1 and lrc[0] == -1:
- #print axis, split_coord, "Splitting left region", lrc
- left_region = partition_region(left_region, (axis + 1) % 3)
- elif lrc.size > 1 and lrc[0] > -1:
- left_region = []
+ if lrc.size > 1:
+ if lrc[0] == -1:
+ left_region = partition_region(left_region, (axis + 1) % 3)
+ if lrc[0] > -1:
+ left_region = []
#print axis, split_coord, "Not splitting left region", lrc
else:
- left_region = [left_region]
-
- if rrc.size > 1 and rrc[0] == -1:
- #print axis, split_coord, "Splitting right region", rrc
- right_region = partition_region(right_region, (axis + 1) % 3)
- elif rrc.size > 1 and rrc[0] > -1:
- right_region = []
- #print axis, split_coord, "Not splitting right region", rrc
+ if lrc[0] == -1:
+ left_region = [left_region]
+ else:
+ left_region = []
+
+ if rrc.size > 1:
+ if rrc[0] == -1:
+ right_region = partition_region(right_region, (axis + 1) % 3)
+ if rrc[0] > -1:
+ right_region = []
else:
- right_region = [right_region]
+ if rrc[0] == -1:
+ right_region = [right_region]
+ else:
+ right_region = []
return left_region + right_region
Modified: trunk/yt/lagos/BaseDataTypes.py
==============================================================================
--- trunk/yt/lagos/BaseDataTypes.py (original)
+++ trunk/yt/lagos/BaseDataTypes.py Sat Dec 5 23:16:43 2009
@@ -2395,7 +2395,6 @@
continue
mylog.debug("Getting field %s from %s possible grids",
field, len(self._grids))
- self[field] = na.zeros(self.ActiveDimensions, dtype='float64') -999
if self._use_pbar: pbar = \
get_pbar('Searching grids for values ', len(self._grids))
# How do we find out the root grid base dx?
@@ -2567,14 +2566,107 @@
return self.right_edge
class AMRIntSmoothedCoveringGridBase(AMRCoveringGridBase):
- _skip_add = True
+ _type_name = "si_covering_grid"
+ @wraps(AMRCoveringGridBase.__init__)
+ def __init__(self, *args, **kwargs):
+ AMRCoveringGridBase.__init__(self, *args, **kwargs)
+ self._final_start_index = self.global_startindex
+
def _get_list_of_grids(self):
buffer = self.pf.h.select_grids(0)[0].dds
- AMRCoveringGridBase._get_list_of_grids(buffer)
+ AMRCoveringGridBase._get_list_of_grids(self, buffer)
self._grids = self._grids[::-1]
- def _get_data_from_grid(self, grid, fields):
- pass
+ def get_data(self, field=None):
+ dx = [self.pf.h.select_grids(l)[0].dds for l in range(self.level+1)]
+ self._get_list_of_grids()
+ # We don't generate coordinates here.
+ if field == None:
+ fields_to_get = self.fields
+ else:
+ fields_to_get = ensure_list(field)
+ for field in fields_to_get:
+ grid_count = 0
+ if self.data.has_key(field):
+ continue
+ mylog.debug("Getting field %s from %s possible grids",
+ field, len(self._grids))
+ if self._use_pbar: pbar = \
+ get_pbar('Searching grids for values ', len(self._grids))
+ # Note that, thanks to some trickery, we have different dimensions
+ # on the field than one might think from looking at the dx and the
+ # L/R edges.
+ # We jump-start our task here
+ self._update_level_state(0, field)
+ for level in range(self.level+1):
+ for grid in self.select_grids(level):
+ if self._use_pbar: pbar.update(grid_count)
+ self._get_data_from_grid(grid, field, level)
+ grid_count += 1
+ if level < self.level:
+ self._update_level_state(level + 1)
+ self._refine(1, field)
+ if self.level > 0:
+ self[field] = self[field][1:-1,1:-1,1:-1]
+ if self._use_pbar: pbar.finish()
+
+ def _update_level_state(self, level, field = None):
+ dx = self.pf.h.select_grids(level)[0].dds
+ for ax, v in zip('xyz', dx): self['cd%s'%ax] = v
+ self._old_global_startindex = self.global_startindex
+ self.global_startindex = na.array(na.floor(1.000001*self.left_edge / dx) - 1, dtype='int64')
+ self.domain_width = na.rint((self.pf["DomainRightEdge"] -
+ self.pf["DomainLeftEdge"])/dx).astype('int64')
+ if level == 0 and self.level > 0:
+ # We use one grid cell at LEAST, plus one buffer on all sides
+ idims = na.ceil((self.right_edge-self.left_edge)/dx) + 2
+ self[field] = na.zeros(idims,dtype='float64')-999
+ elif level == 0 and self.level == 0:
+ self.global_startindex = na.array(na.floor(self.left_edge / dx), dtype='int64')
+ idims = na.ceil((self.right_edge-self.left_edge)/dx)
+ self[field] = na.zeros(idims,dtype='float64')-999
+
+ def _refine(self, dlevel, field):
+ rf = float(self.pf["RefineBy"]**dlevel)
+
+ old_dims = na.array(self[field].shape) - 1
+ old_left = (self._old_global_startindex + 0.5) * rf
+ old_right = rf*old_dims + old_left
+ old_bounds = [old_left[0], old_right[0],
+ old_left[1], old_right[1],
+ old_left[2], old_right[2]]
+
+ dx = na.array([self['cd%s' % ax] for ax in 'xyz'], dtype='float64')
+ new_dims = na.ceil(0.999999999 * (self.right_edge-self.left_edge)/dx) + 2
+
+ # x, y, z are the new bounds
+ x,y,z = (na.mgrid[0:new_dims[0], 0:new_dims[1], 0:new_dims[2]]
+ ).astype('float64') + 0.5
+ x += self.global_startindex[0]
+ y += self.global_startindex[1]
+ z += self.global_startindex[2]
+ fake_grid = {'x':x,'y':y,'z':z}
+
+ interpolator = TrilinearFieldInterpolator(
+ self[field], old_bounds, ['x','y','z'],
+ truncate = True)
+ self[field] = interpolator(fake_grid)
+
+ def _get_data_from_grid(self, grid, fields, level):
+ fields = ensure_list(fields)
+ g_fields = [grid[field] for field in fields]
+ c_fields = [self[field] for field in fields]
+ count = PointCombine.FillRegion(1,
+ grid.get_global_startindex(), self.global_startindex,
+ c_fields, g_fields,
+ na.array(self[field].shape, dtype='int32'),
+ grid.ActiveDimensions,
+ grid.child_mask, self.domain_width, 1, 0)
+ return count
+
+ def flush_data(self, *args, **kwargs):
+ raise KeyError("Can't do this")
+
def _reconstruct_object(*args, **kwargs):
pfid = args[0]
Modified: trunk/yt/lagos/EnzoDefs.py
==============================================================================
--- trunk/yt/lagos/EnzoDefs.py (original)
+++ trunk/yt/lagos/EnzoDefs.py Sat Dec 5 23:16:43 2009
@@ -52,6 +52,7 @@
parameterDict = {"CosmologyCurrentRedshift": float,
"CosmologyComovingBoxSize": float,
"CosmologyOmegaMatterNow": float,
+ "CosmologyOmegaLambdaNow": float,
"CosmologyHubbleConstantNow": float,
"CosmologyInitialRedshift": float,
"DualEnergyFormalismEta1": float,
Modified: trunk/yt/lagos/HaloFinding.py
==============================================================================
--- trunk/yt/lagos/HaloFinding.py (original)
+++ trunk/yt/lagos/HaloFinding.py Sat Dec 5 23:16:43 2009
@@ -599,6 +599,16 @@
def _run_finder(self):
yt_counters("Reading Data")
+ # Test to make sure the particle IDs aren't suspicious.
+ exit = False
+ if (self.particle_fields["particle_index"] < 0).any():
+ mylog.error("Negative values in particle_index field. Parallel HOP will fail.")
+ exit = True
+ if na.unique(self.particle_fields["particle_index"]).size != \
+ self.particle_fields["particle_index"].size:
+ mylog.error("Non-unique values in particle_index field. Parallel HOP will fail.")
+ exit = True
+ self._mpi_exit_test(exit)
obj = RunParallelHOP(self.period, self.padding,
self.num_neighbors, self.bounds,
self.particle_fields["particle_position_x"],
Modified: trunk/yt/lagos/ParallelTools.py
==============================================================================
--- trunk/yt/lagos/ParallelTools.py (original)
+++ trunk/yt/lagos/ParallelTools.py Sat Dec 5 23:16:43 2009
@@ -778,6 +778,13 @@
mylog.debug("Opening MPI Barrier on %s", MPI.COMM_WORLD.rank)
MPI.COMM_WORLD.Barrier()
+ def _mpi_exit_test(self, data=False):
+ # data==True -> exit. data==False -> no exit
+ statuses = self._mpi_info_dict(data)
+ if True in statuses.values():
+ raise RunTimeError("Fatal error. Exiting.")
+ return None
+
@parallel_passthrough
def _mpi_catdict(self, data):
field_keys = data.keys()
Modified: trunk/yt/lagos/ParticleIO.py
==============================================================================
--- trunk/yt/lagos/ParticleIO.py (original)
+++ trunk/yt/lagos/ParticleIO.py Sat Dec 5 23:16:43 2009
@@ -100,6 +100,7 @@
conv_factors.append(na.ones(len(grid_list), dtype='float64'))
else:
to_add = f.get_dependencies(pf = self.pf).requested
+ to_add = list(na.unique(to_add))
if len(to_add) != 1: raise KeyError
fields_to_read += to_add
conv_factors.append(
More information about the yt-svn
mailing list