[yt-svn] commit/yt: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Jan 31 12:06:36 PST 2014
5 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/0c1f956d8ae1/
Changeset: 0c1f956d8ae1
Branch: yt-3.0
User: bcrosby
Date: 2014-01-21 21:02:17
Summary: Trying to sort out why particle masses are not converted properly when using the Enzo frontend for inline analysis
Affected #: 1 file
diff -r 5edcb35f319179e22925db71ef7adf5122595282 -r 0c1f956d8ae175e7c2cf9620f39306be969d2025 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -305,8 +305,10 @@
if mask is None: continue
for field in field_list:
data = self.grids_in_memory[g.id][field]
+ '''
if field in _convert_mass:
data *= g.dds.prod(dtype="f8")
+ '''
yield (ptype, field), data[mask]
@property
https://bitbucket.org/yt_analysis/yt/commits/af8b570e113c/
Changeset: af8b570e113c
Branch: yt-3.0
User: bcrosby
Date: 2014-01-27 20:16:02
Summary: Mass conversion in Enzo frontend now modifies a copy of the data rather than modifying the data in place. This prevents the Enzo particle mass from being changed every time yt is called.
Affected #: 1 file
diff -r 0c1f956d8ae175e7c2cf9620f39306be969d2025 -r af8b570e113cc3d267e12d7804546d2de82d04b1 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -292,7 +292,7 @@
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
- for chunk in chunks: # These should be organized by grid filename
+ for chunk in chunks:
for g in chunk.objs:
if g.id not in self.grids_in_memory: continue
nap = sum(g.NumberOfActiveParticles.values())
@@ -305,10 +305,10 @@
if mask is None: continue
for field in field_list:
data = self.grids_in_memory[g.id][field]
- '''
+
if field in _convert_mass:
- data *= g.dds.prod(dtype="f8")
- '''
+ data = data * g.dds.prod(dtype="f8")
+
yield (ptype, field), data[mask]
@property
https://bitbucket.org/yt_analysis/yt/commits/2f7c23912e53/
Changeset: 2f7c23912e53
Branch: yt-3.0
User: bcrosby
Date: 2014-01-27 20:18:47
Summary: merged
Affected #: 3 files
diff -r af8b570e113cc3d267e12d7804546d2de82d04b1 -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -126,14 +126,12 @@
cdef SelectorObject selector = selection_routines.AlwaysSelector(None)
cdef OctVisitorData data
obj.setup_data(&data, -1)
- assert(ref_mask.shape[0] / 8.0 == <int>(ref_mask.shape[0]/8.0))
- obj.allocate_domains([ref_mask.shape[0] / 8.0])
cdef int i, j, k, n
data.global_index = -1
data.level = 0
- # This is not something I terribly like, but it needs to be done.
- data.oref = 1
- data.nz = 8
+ assert(ref_mask.shape[0] / float(data.nz) ==
+ <int>(ref_mask.shape[0]/float(data.nz)))
+ obj.allocate_domains([ref_mask.shape[0] / data.nz])
cdef np.float64_t pos[3], dds[3]
# This dds is the oct-width
for i in range(3):
@@ -173,8 +171,7 @@
pos[1] += dds[1]
pos[0] += dds[0]
obj.nocts = cur.n_assigned
- if obj.nocts * 8 != ref_mask.size:
- print "SOMETHING WRONG", ref_mask.size, obj.nocts, obj.oref
+ if obj.nocts * data.nz != ref_mask.size:
raise KeyError(ref_mask.size, obj.nocts, obj.oref,
obj.partial_coverage)
return obj
diff -r af8b570e113cc3d267e12d7804546d2de82d04b1 -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -202,15 +202,22 @@
o.file_ind = nfinest[0]
o.domain = 1
nfinest[0] += 1
- elif arr[data.index] == 1:
+ elif arr[data.index] > 0:
+ if arr[data.index] != 1 and arr[data.index] != 8:
+ print "ARRAY CLUE: ", arr[data.index], "UNKNOWN"
+ raise RuntimeError
if o.children == NULL:
o.children = <Oct **> malloc(sizeof(Oct *) * 8)
for i in range(8):
o.children[i] = NULL
- o.children[ii] = &octs[nocts[0]]
- o.children[ii].domain_ind = nocts[0]
- o.children[ii].file_ind = -1
- o.children[ii].domain = -1
- o.children[ii].children = NULL
- nocts[0] += 1
+ for i in range(arr[data.index]):
+ o.children[ii + i] = &octs[nocts[0]]
+ o.children[ii + i].domain_ind = nocts[0]
+ o.children[ii + i].file_ind = -1
+ o.children[ii + i].domain = -1
+ o.children[ii + i].children = NULL
+ nocts[0] += 1
+ else:
+ print "SOMETHING IS AMISS", data.index
+ raise RuntimeError
data.index += 1
diff -r af8b570e113cc3d267e12d7804546d2de82d04b1 -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -12,6 +12,8 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import md5
+import cPickle
import itertools as it
import numpy as np
import importlib
@@ -514,3 +516,87 @@
[44,48,48],
],
]
+
+def check_results(func):
+ r"""This is a decorator for a function to verify that the (numpy ndarray)
+ result of a function is what it should be.
+
+ This function is designed to be used for very light answer testing.
+ Essentially, it wraps around a larger function that returns a numpy array,
+ and that has results that should not change. It is not necessarily used
+ inside the testing scripts themselves, but inside testing scripts written
+ by developers during the testing of pull requests and new functionality.
+ If a hash is specified, it "wins" and the others are ignored. Otherwise,
+ tolerance is 1e-8 (just above single precision.)
+
+ The correct results will be stored if the command line contains
+ --answer-reference , and otherwise it will compare against the results on
+ disk. The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME
+ is the name of the function being tested.
+
+ This will raise an exception if the results are not correct.
+
+ Examples
+ --------
+
+ @check_results
+ def my_func(pf):
+ return pf.domain_width
+
+ my_func(pf)
+ """
+ def compute_results(func):
+ def _func(*args, **kwargs):
+ name = kwargs.pop("result_basename", func.func_name)
+ rv = func(*args, **kwargs)
+ if hasattr(rv, "convert_to_cgs"):
+ rv.convert_to_cgs()
+ _rv = rv.ndarray_view()
+ else:
+ _rv = rv
+ mi = _rv.min()
+ ma = _rv.max()
+ st = _rv.std(dtype="float64")
+ su = _rv.sum(dtype="float64")
+ si = _rv.size
+ ha = md5.md5(_rv.tostring()).hexdigest()
+ fn = "func_results_ref_%s.cpkl" % (name)
+ with open(fn, "wb") as f:
+ cPickle.dump( (mi, ma, st, su, si, ha), f)
+ return rv
+ return _func
+ from yt.mods import unparsed_args
+ if "--answer-reference" in unparsed_args:
+ return compute_results(func)
+
+ def compare_results(func):
+ def _func(*args, **kwargs):
+ name = kwargs.pop("result_basename", func.func_name)
+ rv = func(*args, **kwargs)
+ if hasattr(rv, "convert_to_cgs"):
+ rv.convert_to_cgs()
+ _rv = rv.ndarray_view()
+ else:
+ _rv = rv
+ vals = (_rv.min(),
+ _rv.max(),
+ _rv.std(dtype="float64"),
+ _rv.sum(dtype="float64"),
+ _rv.size,
+ md5.md5(_rv.tostring()).hexdigest() )
+ fn = "func_results_ref_%s.cpkl" % (name)
+ if not os.path.exists(fn):
+ print "Answers need to be created with --answer-reference ."
+ return False
+ with open(fn, "rb") as f:
+ ref = cPickle.load(f)
+ print "Sizes: %s (%s, %s)" % (vals[4] == ref[4], vals[4], ref[4])
+ assert_allclose(vals[0], ref[0], 1e-8, err_msg="min")
+ assert_allclose(vals[1], ref[1], 1e-8, err_msg="max")
+ assert_allclose(vals[2], ref[2], 1e-8, err_msg="std")
+ assert_allclose(vals[3], ref[3], 1e-8, err_msg="sum")
+ assert_equal(vals[4], ref[4])
+ print "Hashes equal: %s" % (vals[-1] == ref[-1])
+ return rv
+ return _func
+ return compare_results(func)
https://bitbucket.org/yt_analysis/yt/commits/02b6ca6a162f/
Changeset: 02b6ca6a162f
Branch: yt-3.0
User: bcrosby
Date: 2014-01-31 20:36:01
Summary: merged
Affected #: 8 files
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -30,6 +30,7 @@
yt/utilities/lib/fortran_reader.c
yt/utilities/lib/freetype_writer.c
yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/image_utilities.c
yt/utilities/lib/Interpolators.c
yt/utilities/lib/kdtree.c
yt/utilities/lib/mesh_utilities.c
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -617,17 +617,6 @@
mylog.info("Re-writing halo %d" % halo['id'])
self._write_profile(profile, filename, format='%0.6e')
- if newProfile:
- # Temporary solution to memory leak.
- for g in self.pf.h.grids:
- g.clear_data()
- sphere.clear_data()
- del sphere
- # Currently, this seems to be the only way to prevent large
- # halo profiling runs from running out of ram.
- # It would be good to track down the real cause at some point.
- gc.collect()
-
return profile
def _get_halo_sphere(self, halo):
@@ -637,7 +626,6 @@
"""
sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
- #if len(sphere._grids) == 0: return None
new_sphere = False
if self.recenter:
@@ -663,11 +651,6 @@
new_sphere = True
if new_sphere:
- # Temporary solution to memory leak.
- for g in self.pf.h.grids:
- g.clear_data()
- sphere.clear_data()
- del sphere
sphere = self.pf.h.sphere(halo['center'], halo['r_max']/self.pf.units['mpc'])
if self._need_bulk_velocity:
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -317,7 +317,7 @@
coords[:] = self.Level
return coords
- def tcoords(self, dobj):
+ def select_tcoords(self, dobj):
dt, t = dobj.selector.get_dt(self)
return dt, t
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -123,6 +123,17 @@
for i, sl in slicer:
yield sl, mask[:,:,:,i]
+ def select_tcoords(self, dobj):
+ # These will not be pre-allocated, which can be a problem for speed and
+ # memory usage.
+ dts, ts = [], []
+ for sl, mask in self.select_blocks(dobj.selector):
+ sl.child_mask = mask
+ dt, t = dobj.selector.get_dt(sl)
+ dts.append(dt)
+ ts.append(t)
+ return np.concatenate(dts), np.concatenate(ts)
+
@property
def domain_ind(self):
if self._domain_ind is None:
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/data_objects/unstructured_mesh.py
--- a/yt/data_objects/unstructured_mesh.py
+++ b/yt/data_objects/unstructured_mesh.py
@@ -122,7 +122,7 @@
if mask is None: return np.empty(0, dtype='int32')
return ind[mask]
- def tcoords(self, dobj):
+ def select_tcoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None: return np.empty(0, dtype='float64')
dt, t = dobj.selector.get_dt_mesh(self, mask.sum(), self._index_offset)
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -568,6 +568,7 @@
else:
tr = func(self)
if self._cache:
+
setattr(self, n, tr)
return tr
return property(cached_func)
@@ -591,6 +592,10 @@
for obj in self.objs:
f = getattr(obj, mname)
arrs.append(f(self.dobj))
+ if method == "dtcoords":
+ arrs = [arr[0] for arr in arrs]
+ elif method == "tcoords":
+ arrs = [arr[1] for arr in arrs]
arrs = np.concatenate(arrs)
self.data_size = arrs.shape[0]
return arrs
@@ -656,7 +661,7 @@
if self.data_size == 0: return cdt
ind = 0
for obj in self.objs:
- gdt, gt = obj.tcoords(self.dobj)
+ gdt, gt = obj.select_tcoords(self.dobj)
if gt.shape == 0: continue
ct[ind:ind+gt.size] = gt
cdt[ind:ind+gdt.size] = gdt
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/geometry/selection_routines.pyx
--- a/yt/geometry/selection_routines.pyx
+++ b/yt/geometry/selection_routines.pyx
@@ -1213,6 +1213,20 @@
return 1
return 0
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ cdef int select_cell(self, np.float64_t pos[3],
+ np.float64_t dds[3]) nogil:
+ # This is terribly inefficient for Octrees. For grids, it will never
+ # get called.
+ cdef int i
+ cdef np.float64_t left_edge[3], right_edge[3]
+ for i in range(3):
+ left_edge[i] = pos[i] - dds[i]/2.0
+ right_edge[i] = pos[i] + dds[i]/2.0
+ return self.select_bbox(left_edge, right_edge)
+
def _hash_vals(self):
return (self.p1[0], self.p1[1], self.p1[2],
self.p2[0], self.p2[1], self.p2[2],
diff -r 2f7c23912e530e5649d338c5b3c4d637bbdb9bec -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -534,6 +534,12 @@
disk. The filename will be func_results_ref_FUNCNAME.cpkl where FUNCNAME
is the name of the function being tested.
+ If you would like more control over the name of the pickle file the results
+ are stored in, you can pass the result_basename keyword argument to the
+ function you are testing. The check_results decorator will use the value
+ of the keyword to construct the filename of the results data file. If
+ result_basename is not specified, the name of the testing function is used.
+
This will raise an exception if the results are not correct.
Examples
@@ -544,6 +550,13 @@
return pf.domain_width
my_func(pf)
+
+ @check_results
+ def field_checker(dd, field_name):
+ return dd[field_name]
+
+ field_cheker(pf.h.all_data(), 'density', result_basename='density')
+
"""
def compute_results(func):
def _func(*args, **kwargs):
https://bitbucket.org/yt_analysis/yt/commits/917374ee07f8/
Changeset: 917374ee07f8
Branch: yt-3.0
User: bcrosby
Date: 2014-01-31 21:00:03
Summary: Cleaned up whitespace changes
Affected #: 1 file
diff -r 02b6ca6a162f0929bbc5256a557c75eb8f59b1f9 -r 917374ee07f8f7685e0130a253559ef10ed9380c yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -292,7 +292,7 @@
def _read_particle_fields(self, chunks, ptf, selector):
chunks = list(chunks)
- for chunk in chunks:
+ for chunk in chunks: # These should be organized by grid filename
for g in chunk.objs:
if g.id not in self.grids_in_memory: continue
nap = sum(g.NumberOfActiveParticles.values())
@@ -305,10 +305,8 @@
if mask is None: continue
for field in field_list:
data = self.grids_in_memory[g.id][field]
-
if field in _convert_mass:
data = data * g.dds.prod(dtype="f8")
-
yield (ptype, field), data[mask]
@property
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list