[yt-svn] commit/yt: 2 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Wed Mar 6 06:12:22 PST 2013
2 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/1626fc1a3603/
changeset: 1626fc1a3603
branch: yt
user: ngoldbaum
date: 2013-03-05 21:12:54
summary: Making the timestamp callback a bit more flexible.
affected #: 2 files
diff -r a06f8e4569925dfe4b21d2cc9f8ab6a27be6523f -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 yt/utilities/physical_constants.py
--- a/yt/utilities/physical_constants.py
+++ b/yt/utilities/physical_constants.py
@@ -63,6 +63,7 @@
# time
sec_per_Gyr = 31.5576e15
sec_per_Myr = 31.5576e12
+sec_per_kyr = 31.5576e9
sec_per_year = 31.5576e6 # "IAU Style Manual" by G.A. Wilkins, Comm. 5, in IAU Transactions XXB (1989)
sec_per_day = 86400.0
sec_per_hr = 3600.0
diff -r a06f8e4569925dfe4b21d2cc9f8ab6a27be6523f -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -40,6 +40,11 @@
y_dict, y_names, \
axis_names, \
axis_labels
+from yt.utilities.physical_constants import \
+ sec_per_Gyr, sec_per_Myr, \
+ sec_per_kyr, sec_per_year, \
+ sec_per_day, sec_per_hr
+
import _MPL
callback_registry = {}
@@ -1204,15 +1209,18 @@
'min': 60.0,
'minute': 60.0,
'minutes': 60.0,
- 'h': 3600.0,
- 'hour': 3600.0,
- 'hours': 3600.0,
- 'd': 86400.0,
- 'day': 86400.0,
- 'days': 86400.0,
- 'y': 86400.0*365.25,
- 'year': 86400.0*365.25,
- 'years': 86400.0*365.25,
+ 'h': sec_per_hr,
+ 'hour': sec_per_hr,
+ 'hours': sec_per_hr,
+ 'd': sec_per_day,
+ 'day': sec_per_day,
+ 'days': sec_per_day,
+ 'y': sec_per_year,
+ 'year': sec_per_year,
+ 'years': sec_per_year,
+ 'kyr': sec_per_kyr,
+ 'myr': sec_per_Myr,
+ 'gyr': sec_per_Gyr,
'ev': 1e-9 * 7.6e-8 / 6.03,
'kev': 1e-12 * 7.6e-8 / 6.03,
'mev': 1e-15 * 7.6e-8 / 6.03,
@@ -1230,13 +1238,14 @@
self.bbox_dict = bbox_dict
else:
self.bbox_dict = self._bbox_dict
- self.kwargs = {'color': 'w'}
+ self.kwargs = {'color': 'k'}
self.kwargs.update(kwargs)
def __call__(self, plot):
if self.units is None:
t = plot.data.pf.current_time * plot.data.pf['Time']
- scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's']
+ scale_keys = ['as', 'fs', 'ps', 'ns', 'us', 'ms', 's',
+ 'hour', 'day', 'year', 'kyr', 'myr', 'gyr']
self.units = 's'
for k in scale_keys:
if t < self._time_conv[k]:
https://bitbucket.org/yt_analysis/yt/commits/60ea751d9671/
changeset: 60ea751d9671
branch: yt
user: ngoldbaum
date: 2013-03-05 21:19:24
summary: Merging to tip.
affected #: 47 files
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -837,16 +837,11 @@
cd $YT_DIR
( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
-echo "Building Fortran kD-tree module."
-cd yt/utilities/kdtree
-( make 2>&1 ) 1>> ${LOG_FILE}
-cd ../../..
-
echo "Installing yt"
echo $HDF5_DIR > hdf5.cfg
[ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
[ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd $MY_PWD
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,14 +4,61 @@
import sys
import time
import subprocess
+import shutil
+import glob
import distribute_setup
distribute_setup.use_setuptools()
from distutils.command.build_py import build_py
from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
from numpy.distutils import log
from distutils import version
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+
+class BuildForthon(Command):
+
+ """Command for building Forthon modules"""
+
+ description = "Build Forthon modules"
+ user_options = []
+
+ def initialize_options(self):
+
+ """init options"""
+
+ pass
+
+ def finalize_options(self):
+
+ """finalize options"""
+
+ pass
+
+ def run(self):
+
+ """runner"""
+ Forthon_exe = find_executable("Forthon")
+ gfortran_exe = find_executable("gfortran")
+
+ if None in (Forthon_exe, gfortran_exe):
+ sys.stderr.write(
+ "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+ )
+ return
+
+ cwd = os.getcwd()
+ os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+ cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+ "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+ "fKD_source.f90"]
+ subprocess.check_call(cmd, shell=False)
+ shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+ os.chdir(cwd)
+
REASON_FILES = []
REASON_DIRS = [
"",
@@ -36,7 +83,7 @@
files = []
for ext in ["js", "html", "css", "png", "ico", "gif"]:
files += glob.glob("%s/*.%s" % (dir_name, ext))
- REASON_FILES.append( (dir_name, files) )
+ REASON_FILES.append((dir_name, files))
# Verify that we have Cython installed
try:
@@ -93,10 +140,10 @@
language=extension.language, cplus=cplus,
output_file=target_file)
cython_result = Cython.Compiler.Main.compile(source,
- options=options)
+ options=options)
if cython_result.num_errors != 0:
- raise DistutilsError("%d errors while compiling %r with Cython" \
- % (cython_result.num_errors, source))
+ raise DistutilsError("%d errors while compiling %r with Cython"
+ % (cython_result.num_errors, source))
return target_file
@@ -109,7 +156,9 @@
VERSION = "2.5dev"
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+if os.path.exists('MANIFEST'):
+ os.remove('MANIFEST')
+
def get_mercurial_changeset_id(target_dir):
"""adapted from a script by Jason F. Harris, published at
@@ -123,11 +172,11 @@
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
-
+
if (get_changeset.stderr.read() != ""):
print "Error in obtaining current changeset of the Mercurial repository"
changeset = None
-
+
changeset = get_changeset.stdout.read().strip()
if (not re.search("^[0-9a-f]{12}", changeset)):
print "Current changeset of the Mercurial repository is malformed"
@@ -135,12 +184,26 @@
return changeset
+
+class my_build_src(build_src.build_src):
+ def run(self):
+ self.run_command("build_forthon")
+ build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+ def run(self):
+ self.distribution.data_files.append(
+ ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+ )
+ np_install_data.install_data.run(self)
+
class my_build_py(build_py):
def run(self):
# honor the --dry-run flag
if not self.dry_run:
- target_dir = os.path.join(self.build_lib,'yt')
- src_dir = os.getcwd()
+ target_dir = os.path.join(self.build_lib, 'yt')
+ src_dir = os.getcwd()
changeset = get_mercurial_changeset_id(src_dir)
self.mkpath(target_dir)
with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
@@ -148,6 +211,7 @@
build_py.run(self)
+
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
@@ -158,7 +222,7 @@
quiet=True)
config.make_config_py()
- #config.make_svn_version_py()
+ # config.make_svn_version_py()
config.add_subpackage('yt', 'yt')
config.add_scripts("scripts/*")
@@ -176,25 +240,25 @@
+ "simulations, focusing on Adaptive Mesh Refinement data "
"from Enzo, Orion, FLASH, and others.",
classifiers=["Development Status :: 5 - Production/Stable",
- "Environment :: Console",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: GNU General Public License (GPL)",
- "Operating System :: MacOS :: MacOS X",
- "Operating System :: POSIX :: AIX",
- "Operating System :: POSIX :: Linux",
- "Programming Language :: C",
- "Programming Language :: Python",
- "Topic :: Scientific/Engineering :: Astronomy",
- "Topic :: Scientific/Engineering :: Physics",
- "Topic :: Scientific/Engineering :: Visualization"],
- keywords='astronomy astrophysics visualization ' + \
- 'amr adaptivemeshrefinement',
+ "Environment :: Console",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: POSIX :: AIX",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: C",
+ "Programming Language :: Python",
+ "Topic :: Scientific/Engineering :: Astronomy",
+ "Topic :: Scientific/Engineering :: Physics",
+ "Topic :: Scientific/Engineering :: Visualization"],
+ keywords='astronomy astrophysics visualization ' +
+ 'amr adaptivemeshrefinement',
entry_points={'console_scripts': [
- 'yt = yt.utilities.command_line:run_main',
- ],
- 'nose.plugins.0.10': [
- 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
- ]
+ 'yt = yt.utilities.command_line:run_main',
+ ],
+ 'nose.plugins.0.10': [
+ 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+ ]
},
author="Matthew J. Turk",
author_email="matthewturk at gmail.com",
@@ -203,8 +267,9 @@
configuration=configuration,
zip_safe=False,
data_files=REASON_FILES,
- cmdclass = {'build_py': my_build_py},
- )
+ cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+ 'build_src': my_build_src, 'install_data': my_install_data},
+ )
return
if __name__ == '__main__':
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -244,8 +244,9 @@
If True, use dynamic load balancing to create the projections.
Default: False.
- Getting the Nearest Galaxies
- ----------------------------
+ Notes
+ -----
+
The light ray tool will use the HaloProfiler to calculate the
distance and mass of the nearest halo to that pixel. In order
to do this, a dictionary called halo_profiler_parameters is used
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/enzofof_merger_tree.py
@@ -454,8 +454,8 @@
halonum : int
Halo number at the last output to trace.
- Output
- ------
+ Returns
+ -------
output : dict
Dictionary of redshifts, cycle numbers, and halo numbers
of the most massive progenitor. keys = {redshift, cycle,
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/analysis_modules/halo_merger_tree/merger_tree.py
--- a/yt/analysis_modules/halo_merger_tree/merger_tree.py
+++ b/yt/analysis_modules/halo_merger_tree/merger_tree.py
@@ -758,17 +758,19 @@
def query(self, string):
r"""Performs a query of the database and returns the results as a list
- of tuple(s), even if the result is singular.
+ of tuples, even if the result is singular.
Parameters
----------
- string : String
+
+ string : str
The SQL query of the database.
Examples
- -------
+ --------
+
>>> results = mtc.query("SELECT GlobalHaloID from Halos where SnapHaloID = 0 and \
- ... SnapZ = 0;")
+ ... SnapZ = 0;")
"""
# Query the database and return a list of tuples.
if string is None:
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -430,8 +430,8 @@
After all the calls to `add_profile`, this will trigger the actual
calculations and output the profiles to disk.
- Paramters
- ---------
+ Parameters
+ ----------
filename : str
If set, a file will be written with all of the filtered halos
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -60,9 +60,9 @@
Initialize an EmissivityIntegrator object.
- Keyword Parameters
- ------------------
- filename: string
+ Parameters
+ ----------
+ filename: string, default None
Path to data file containing emissivity values. If None,
a file called xray_emissivity.h5 is used. This file contains
emissivity tables for primordial elements and for metals at
@@ -146,8 +146,8 @@
e_min: float
the maximum energy in keV for the energy band.
- Keyword Parameters
- ------------------
+ Other Parameters
+ ----------------
filename: string
Path to data file containing emissivity values. If None,
a file called xray_emissivity.h5 is used. This file contains
@@ -220,8 +220,8 @@
e_min: float
the maximum energy in keV for the energy band.
- Keyword Parameters
- ------------------
+ Other Parameters
+ ----------------
filename: string
Path to data file containing emissivity values. If None,
a file called xray_emissivity.h5 is used. This file contains
@@ -277,8 +277,8 @@
e_min: float
the maximum energy in keV for the energy band.
- Keyword Parameters
- ------------------
+ Other Parameters
+ ----------------
filename: string
Path to data file containing emissivity values. If None,
a file called xray_emissivity.h5 is used. This file contains
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -178,7 +178,7 @@
self.child_mask = 1
self.ActiveDimensions = self.field_data['x'].shape
self.DW = grid.pf.domain_right_edge - grid.pf.domain_left_edge
-
+
def __getitem__(self, field):
if field not in self.field_data.keys():
if field == "RadiusCode":
@@ -543,7 +543,7 @@
# generated it above. This way, fields that are grabbed from the
# grids are sorted properly.
self[field] = self[field][self._sortkey]
-
+
class AMROrthoRayBase(AMR1DData):
"""
This is an orthogonal ray cast through the entire domain, at a specific
@@ -686,9 +686,9 @@
vs = self._get_line_at_coord(RE[:,i], i)
p = p | ( ( (LE[:,i1] <= vs[:,i1]) & (RE[:,i1] >= vs[:,i1]) ) \
& ( (LE[:,i2] <= vs[:,i2]) & (RE[:,i2] >= vs[:,i2]) ) )
- p = p | ( np.all( LE <= self.start_point, axis=1 )
+ p = p | ( np.all( LE <= self.start_point, axis=1 )
& np.all( RE >= self.start_point, axis=1 ) )
- p = p | ( np.all( LE <= self.end_point, axis=1 )
+ p = p | ( np.all( LE <= self.end_point, axis=1 )
& np.all( RE >= self.end_point, axis=1 ) )
self._grids = self.hierarchy.grids[p]
@@ -708,7 +708,7 @@
if not iterable(gf):
gf = gf * np.ones(grid.child_mask.shape)
return gf[mask]
-
+
@cache_mask
def _get_cut_mask(self, grid):
mask = np.zeros(grid.ActiveDimensions, dtype='int')
@@ -751,11 +751,11 @@
--------
>>> from yt.visualization.api import Streamlines
- >>> streamlines = Streamlines(pf, [0.5]*3)
+ >>> streamlines = Streamlines(pf, [0.5]*3)
>>> streamlines.integrate_through_volume()
>>> stream = streamlines.path(0)
>>> matplotlib.pylab.semilogy(stream['t'], stream['Density'], '-x')
-
+
"""
_type_name = "streamline"
_con_args = ('positions')
@@ -788,16 +788,16 @@
@restore_grid_state
def _get_data_from_grid(self, grid, field):
# No child masking here; it happens inside the mask cut
- mask = self._get_cut_mask(grid)
+ mask = self._get_cut_mask(grid)
if field == 'dts': return self._dts[grid.id]
if field == 't': return self._ts[grid.id]
return grid[field].flat[mask]
-
+
@cache_mask
def _get_cut_mask(self, grid):
#pdb.set_trace()
points_in_grid = np.all(self.positions > grid.LeftEdge, axis=1) & \
- np.all(self.positions <= grid.RightEdge, axis=1)
+ np.all(self.positions <= grid.RightEdge, axis=1)
pids = np.where(points_in_grid)[0]
mask = np.zeros(points_in_grid.sum(), dtype='int')
dts = np.zeros(points_in_grid.sum(), dtype='float64')
@@ -832,7 +832,7 @@
AMRData.__init__(self, pf, fields, **kwargs)
self.field = ensure_list(fields)[0]
self.set_field_parameter("axis",axis)
-
+
def _convert_field_name(self, field):
return field
@@ -851,7 +851,6 @@
fields_to_get = self.fields[:]
else:
fields_to_get = ensure_list(fields)
- temp_data = {}
for field in fields_to_get:
if self.field_data.has_key(field): continue
if field not in self.hierarchy.field_list:
@@ -861,18 +860,13 @@
# we're going to have to set the same thing several times
data = [self._get_data_from_grid(grid, field)
for grid in self._get_grids()]
- if len(data) == 0: data = np.array([])
- else: data = np.concatenate(data)
- temp_data[field] = data
+ if len(data) == 0:
+ data = np.array([])
+ else:
+ data = np.concatenate(data)
# Now the next field can use this field
- self[field] = temp_data[field]
- # We finalize
- if temp_data != {}:
- temp_data = self.comm.par_combine_object(temp_data,
- datatype='dict', op='cat')
- # And set, for the next group
- for field in temp_data.keys():
- self[field] = temp_data[field]
+ self[field] = self.comm.par_combine_object(data, op='cat',
+ datatype='array')
def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
axis = self.axis
@@ -887,7 +881,7 @@
(bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
if axes_unit is None and units != ('1', '1'):
axes_unit = units
- pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
+ pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
plot_type=plot_type)
pw.set_axes_unit(axes_unit)
return pw
@@ -993,7 +987,7 @@
for field in fields:
#mylog.debug("Trying to obtain %s from node %s",
#self._convert_field_name(field), node_name)
- fdata=self.hierarchy.get_data(node_name,
+ fdata=self.hierarchy.get_data(node_name,
self._convert_field_name(field))
if fdata is not None:
#mylog.debug("Got %s from node %s", field, node_name)
@@ -1151,7 +1145,7 @@
t = points * ind[cm] * dx + (grid.LeftEdge[xaxis] + 0.5 * dx)
# calculate ypoints array
ind = cmI[1, :].ravel() # yind
- del cmI # no longer needed
+ del cmI # no longer needed
t = np.vstack( (t, points * ind[cm] * dy + \
(grid.LeftEdge[yaxis] + 0.5 * dy))
)
@@ -1210,7 +1204,7 @@
def hub_upload(self):
self._mrep.upload()
- def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
+ def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
origin='center-window'):
r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
object.
@@ -1490,7 +1484,7 @@
self.dims = dims
self.dds = self.width / self.dims
self.bounds = np.array([0.0,1.0,0.0,1.0])
-
+
self.set_field_parameter('center', center)
# Let's set up our plane equation
# ax + by + cz + d = 0
@@ -1576,7 +1570,7 @@
# Mark these pixels to speed things up
self._pixelmask[pointI] = 0
-
+
return
else:
raise SyntaxError("Making a fixed resolution slice with "
@@ -1664,7 +1658,7 @@
L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
return "%s/c%s_L%s" % \
(self._top_node, cen_name, L_name)
-
+
class AMRQuadTreeProjBase(AMR2DData):
"""
This is a data object corresponding to a line integral through the
@@ -1822,7 +1816,7 @@
convs[:] = 1.0
return dls, convs
- def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
+ def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
origin='center-window'):
r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
object.
@@ -1863,7 +1857,7 @@
if g.Level == level],
self.get_dependencies(fields), self.hierarchy.io)
self._add_level_to_tree(tree, level, fields)
- mylog.debug("End of projecting level level %s, memory usage %0.3e",
+ mylog.debug("End of projecting level level %s, memory usage %0.3e",
level, get_memory_usage()/1024.)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
@@ -1955,7 +1949,7 @@
xpoints = (xind + (start_index[x_dict[self.axis]])).astype('int64')
ypoints = (yind + (start_index[y_dict[self.axis]])).astype('int64')
to_add = np.array([d[used_points].ravel() for d in full_proj], order='F')
- tree.add_array_to_tree(grid.Level, xpoints, ypoints,
+ tree.add_array_to_tree(grid.Level, xpoints, ypoints,
to_add, weight_proj[used_points].ravel())
def _add_level_to_tree(self, tree, level, fields):
@@ -2296,7 +2290,7 @@
del self.__retval_coords[grid.id]
del self.__retval_fields[grid.id]
del self.__overlap_masks[grid.id]
- mylog.debug("End of projecting level level %s, memory usage %0.3e",
+ mylog.debug("End of projecting level level %s, memory usage %0.3e",
level, get_memory_usage()/1024.)
coord_data = np.concatenate(coord_data, axis=1)
field_data = np.concatenate(field_data, axis=1)
@@ -2327,7 +2321,7 @@
def add_fields(self, fields, weight = "CellMassMsun"):
pass
- def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
+ def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
origin='center-window'):
r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this
object.
@@ -2535,7 +2529,7 @@
ref_ratio = self.pf.refine_by**(self.level - grid.Level)
FillBuffer(ref_ratio,
grid.get_global_startindex(), self.global_startindex,
- c_fields, g_fields,
+ c_fields, g_fields,
self.ActiveDimensions, grid.ActiveDimensions,
grid.child_mask, self.domain_width, dls[grid.Level],
self.axis)
@@ -2696,9 +2690,9 @@
def cut_region(self, field_cuts):
"""
Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
+ fly with a set of field_cuts. It is very useful for applying
conditions to the fields in your data object.
-
+
Examples
--------
To find the total mass of gas above 10^6 K in your volume:
@@ -2739,7 +2733,7 @@
useful for calculating, for instance, total isocontour area, or
visualizing in an external program (such as `MeshLab
<http://meshlab.sf.net>`_.)
-
+
Parameters
----------
field : string
@@ -2853,7 +2847,7 @@
Additionally, the returned flux is defined as flux *into* the surface,
not flux *out of* the surface.
-
+
Parameters
----------
field : string
@@ -2910,7 +2904,7 @@
ff = np.ones(vals.shape, dtype="float64")
else:
ff = grid.get_vertex_centered_data(fluxing_field)
- xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
+ xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
[field_x, field_y, field_z]]
return march_cubes_grid_flux(value, vals, xv, yv, zv,
ff, mask, grid.LeftEdge, grid.dds)
@@ -3003,7 +2997,7 @@
----------------
force_refresh : bool
Force a refresh of the data. Defaults to True.
-
+
Examples
--------
"""
@@ -3243,7 +3237,7 @@
if self._grids is not None: return
GLE = self.pf.h.grid_left_edge
GRE = self.pf.h.grid_right_edge
- goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
+ goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
GLE, GRE)
cgrids = self.pf.h.grids[goodI.astype('bool')]
# find_grids_in_inclined_box seems to be broken.
@@ -3251,13 +3245,13 @@
grids = []
for i,grid in enumerate(cgrids):
v = grid_points_in_volume(self.box_lengths, self.origin,
- self._rot_mat, grid.LeftEdge,
+ self._rot_mat, grid.LeftEdge,
grid.RightEdge, grid.dds,
grid.child_mask, 1)
if v: grids.append(grid)
self._grids = np.empty(len(grids), dtype='object')
for gi, g in enumerate(grids): self._grids[gi] = g
-
+
def _is_fully_enclosed(self, grid):
# This should be written at some point.
@@ -3270,10 +3264,10 @@
return True
pm = np.zeros(grid.ActiveDimensions, dtype='int32')
grid_points_in_volume(self.box_lengths, self.origin,
- self._rot_mat, grid.LeftEdge,
+ self._rot_mat, grid.LeftEdge,
grid.RightEdge, grid.dds, pm, 0)
return pm
-
+
class AMRRegionBase(AMR3DData):
"""A 3D region of data with an arbitrary center.
@@ -3409,9 +3403,9 @@
_dx_pad = 0.0
def __init__(self, center, left_edge, right_edge, fields = None,
pf = None, **kwargs):
- AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
+ AMRPeriodicRegionBase.__init__(self, center, left_edge, right_edge,
fields = None, pf = None, **kwargs)
-
+
class AMRGridCollectionBase(AMR3DData):
"""
@@ -3578,7 +3572,7 @@
self._C = C
self._e0 = e0 = e0 / (e0**2.0).sum()**0.5
self._tilt = tilt
-
+
# find the t1 angle needed to rotate about z axis to align e0 to x
t1 = np.arctan(e0[1] / e0[0])
# rotate e0 by -t1
@@ -3588,7 +3582,7 @@
t2 = np.arctan(-r1[2] / r1[0])
"""
calculate the original e1
- given the tilt about the x axis when e0 was aligned
+ given the tilt about the x axis when e0 was aligned
to x after t1, t2 rotations about z, y
"""
RX = get_rotation_matrix(-tilt, (1,0,0)).transpose()
@@ -3612,7 +3606,7 @@
self._refresh_data()
"""
- Having another function find_ellipsoid_grids is too much work,
+ Having another function find_ellipsoid_grids is too much work,
can just use the sphere one and forget about checking orientation
but feed in the A parameter for radius
"""
@@ -3700,7 +3694,7 @@
class AMRCoveringGridBase(AMR3DData):
"""A 3D region with all data extracted to a single, specified
resolution.
-
+
Parameters
----------
level : int
@@ -3798,7 +3792,7 @@
n_bad = np.where(self[obtain_fields[0]]==-999)[0].size
mylog.error("Covering problem: %s cells are uncovered", n_bad)
raise KeyError(n_bad)
-
+
def _generate_field(self, field):
if self.pf.field_info.has_key(field):
# First we check the validator; this might even raise!
@@ -3826,13 +3820,13 @@
def _get_data_from_grid(self, grid, fields):
ll = int(grid.Level == self.level)
ref_ratio = self.pf.refine_by**(self.level - grid.Level)
- g_fields = [gf.astype("float64")
+ g_fields = [gf.astype("float64")
if gf.dtype != "float64"
else gf for gf in (grid[field] for field in fields)]
c_fields = [self[field] for field in fields]
count = FillRegion(ref_ratio,
grid.get_global_startindex(), self.global_startindex,
- c_fields, g_fields,
+ c_fields, g_fields,
self.ActiveDimensions, grid.ActiveDimensions,
grid.child_mask, self.domain_width, ll, 0)
return count
@@ -3848,7 +3842,7 @@
c_fields = [self[field] for field in fields]
FillRegion(ref_ratio,
grid.get_global_startindex(), self.global_startindex,
- c_fields, g_fields,
+ c_fields, g_fields,
self.ActiveDimensions, grid.ActiveDimensions,
grid.child_mask, self.domain_width, ll, 1)
@@ -3869,7 +3863,7 @@
fill the region to level 1, replacing any cells actually
covered by level 1 data, and then recursively repeating this
process until it reaches the specified `level`.
-
+
Parameters
----------
level : int
@@ -3881,10 +3875,11 @@
fields : array_like, optional
A list of fields that you'd like pre-generated for your object
- Example
- -------
- cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
- dims=[128, 128, 128])
+ Examples
+ --------
+
+ >>> cube = pf.h.smoothed_covering_grid(2, left_edge=[0.0, 0.0, 0.0], \
+ ... dims=[128, 128, 128])
"""
_type_name = "smoothed_covering_grid"
def __init__(self, *args, **kwargs):
@@ -3989,7 +3984,7 @@
def _refine(self, dlevel, fields):
rf = float(self.pf.refine_by**dlevel)
- input_left = (self._old_global_startindex + 0.5) * rf
+ input_left = (self._old_global_startindex + 0.5) * rf
dx = np.fromiter((self['cd%s' % ax] for ax in 'xyz'), count=3, dtype='float64')
output_dims = np.rint((self.ActiveDimensions*self.dds)/dx+0.5).astype('int32') + 2
self._cur_dims = output_dims
@@ -4003,13 +3998,13 @@
@restore_field_information_state
def _get_data_from_grid(self, grid, fields):
- g_fields = [gf.astype("float64")
+ g_fields = [gf.astype("float64")
if gf.dtype != "float64"
else gf for gf in (grid[field] for field in fields)]
c_fields = [self.field_data[field] for field in fields]
count = FillRegion(1,
grid.get_global_startindex(), self.global_startindex,
- c_fields, g_fields,
+ c_fields, g_fields,
self._cur_dims, grid.ActiveDimensions,
grid.child_mask, self.domain_width, 1, 0)
return count
@@ -4021,14 +4016,14 @@
"""
This will build a hybrid region based on the boolean logic
of the regions.
-
+
Parameters
----------
regions : list
A list of region objects and strings describing the boolean logic
to use when building the hybrid region. The boolean logic can be
nested using parentheses.
-
+
Examples
--------
>>> re1 = pf.h.region([0.5, 0.5, 0.5], [0.4, 0.4, 0.4],
@@ -4041,7 +4036,7 @@
sp1, ")"])
"""
_type_name = "boolean"
- _con_args = ("regions")
+ _con_args = ("regions",)
def __init__(self, regions, fields = None, pf = None, **kwargs):
# Center is meaningless, but we'll define it all the same.
AMR3DData.__init__(self, [0.5]*3, fields, pf, **kwargs)
@@ -4053,7 +4048,7 @@
self._get_all_regions()
self._make_overlaps()
self._get_list_of_grids()
-
+
def _get_all_regions(self):
# Before anything, we simply find out which regions are involved in all
# of this process, uniquely.
@@ -4063,7 +4058,7 @@
# So cut_masks don't get messed up.
item._boolean_touched = True
self._all_regions = np.unique(self._all_regions)
-
+
def _make_overlaps(self):
# Using the processed cut_masks, we'll figure out what grids
# are left in the hybrid region.
@@ -4097,7 +4092,7 @@
continue
pbar.update(i)
pbar.finish()
-
+
def __repr__(self):
# We'll do this the slow way to be clear what's going on
s = "%s (%s): " % (self.__class__.__name__, self.pf)
@@ -4110,7 +4105,7 @@
if i < (len(self.regions) - 1): s += ", "
s += "]"
return s
-
+
def _is_fully_enclosed(self, grid):
return (grid in self._all_overlap)
@@ -4197,7 +4192,7 @@
<http://meshlab.sf.net>`_.) The object has the properties .vertices
and will sample values if a field is requested. The values are
interpolated to the center of a given face.
-
+
Parameters
----------
data_source : AMR3DDataObject
@@ -4272,7 +4267,7 @@
self[fields] = samples
elif sample_type == "vertex":
self.vertex_samples[fields] = samples
-
+
@restore_grid_state
def _extract_isocontours_from_grid(self, grid, field, value,
@@ -4309,7 +4304,7 @@
Additionally, the returned flux is defined as flux *into* the surface,
not flux *out of* the surface.
-
+
Parameters
----------
field_x : string
@@ -4356,7 +4351,7 @@
return flux
@restore_grid_state
- def _calculate_flux_in_grid(self, grid,
+ def _calculate_flux_in_grid(self, grid,
field_x, field_y, field_z, fluxing_field = None):
mask = self.data_source._get_cut_mask(grid) * grid.child_mask
vals = grid.get_vertex_centered_data(self.surface_field)
@@ -4364,7 +4359,7 @@
ff = np.ones(vals.shape, dtype="float64")
else:
ff = grid.get_vertex_centered_data(fluxing_field)
- xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
+ xv, yv, zv = [grid.get_vertex_centered_data(f) for f in
[field_x, field_y, field_z]]
return march_cubes_grid_flux(self.field_value, vals, xv, yv, zv,
ff, mask, grid.LeftEdge, grid.dds)
@@ -4482,7 +4477,7 @@
w = bounds[i][1] - bounds[i][0]
np.divide(tmp, w, tmp)
np.subtract(tmp, 0.5, tmp) # Center at origin.
- v[ax][:] = tmp
+ v[ax][:] = tmp
f.write("end_header\n")
v.tofile(f)
arr["ni"][:] = 3
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -791,22 +791,28 @@
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
-def get_radius(positions, data):
- c = data.get_field_parameter("center")
- n_tup = tuple([1 for i in range(positions.ndim-1)])
- center = np.tile(np.reshape(c, (positions.shape[0],)+n_tup),(1,)+positions.shape[1:])
- periodicity = data.pf.periodicity
- if any(periodicity):
- period = data.pf.domain_right_edge - data.pf.domain_left_edge
- return periodic_dist(positions, center, period, periodicity)
- else:
- return euclidean_dist(positions, center)
+def get_radius(data, field_prefix):
+ center = data.get_field_parameter("center")
+ DW = data.pf.domain_right_edge - data.pf.domain_left_edge
+ radius = np.zeros(data[field_prefix+"x"].shape, dtype='float64')
+ r = radius.copy()
+ if any(data.pf.periodicity):
+ rdw = radius.copy()
+ for i, ax in enumerate('xyz'):
+ np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+ if data.pf.periodicity[i] == True:
+ np.subtract(DW[i], r, rdw)
+ np.abs(r, r)
+ np.minimum(r, rdw, r)
+ np.power(r, 2.0, r)
+ np.add(radius, r, radius)
+ np.sqrt(radius, radius)
+ return radius
+
def _ParticleRadius(field, data):
- positions = np.array([data["particle_position_%s" % ax] for ax in 'xyz'])
- return get_radius(positions, data)
+ return get_radius(data, "particle_position_")
def _Radius(field, data):
- positions = np.array([data['x'], data['y'], data['z']])
- return get_radius(positions, data)
+ return get_radius(data, "")
def _ConvertRadiusCGS(data):
return data.convert("cm")
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -289,6 +289,11 @@
self.parameter_file.domain_right_edge)
self.parameter_file.domain_dimensions = \
np.round(self.parameter_file.domain_width/gdds[0]).astype('int')
+
+ # Need to reset the units in the parameter file based on the correct
+ # domain left/right/dimensions.
+ self.parameter_file._set_units()
+
if self.parameter_file.dimensionality <= 2 :
self.parameter_file.domain_dimensions[2] = np.int(1)
if self.parameter_file.dimensionality == 1 :
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -193,8 +193,8 @@
Returns
-------
array of dicts
- An array of **kwargs dictionaries to be individually passed to
- the appropriate function matching these kwargs.
+ An array of dictionaries to be individually passed to the appropriate
+ function matching these kwargs.
Examples
--------
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -57,6 +57,49 @@
else:
return False
+
+def add_grid(node, gle, gre, gid, rank, size):
+ if not should_i_build(node, rank, size):
+ return
+
+ if kd_is_leaf(node):
+ insert_grid(node, gle, gre, gid, rank, size)
+ else:
+ less_id = gle[node.split.dim] < node.split.pos
+ if less_id:
+ add_grid(node.left, gle, gre,
+ gid, rank, size)
+
+ greater_id = gre[node.split.dim] > node.split.pos
+ if greater_id:
+ add_grid(node.right, gle, gre,
+ gid, rank, size)
+
+
+def insert_grid(node, gle, gre, grid_id, rank, size):
+ if not should_i_build(node, rank, size):
+ return
+
+ # If we should continue to split based on parallelism, do so!
+ if should_i_split(node, rank, size):
+ geo_split(node, gle, gre, grid_id, rank, size)
+ return
+
+ if np.all(gle <= node.left_edge) and \
+ np.all(gre >= node.right_edge):
+ node.grid = grid_id
+ assert(node.grid is not None)
+ return
+
+ # Split the grid
+ check = split_grid(node, gle, gre, grid_id, rank, size)
+ # If check is -1, then we have found a place where there are no choices.
+ # Exit out and set the node to None.
+ if check == -1:
+ node.grid = None
+ return
+
+
def add_grids(node, gles, gres, gids, rank, size):
if not should_i_build(node, rank, size):
return
@@ -74,9 +117,36 @@
add_grids(node.right, gles[greater_ids], gres[greater_ids],
gids[greater_ids], rank, size)
+
def should_i_split(node, rank, size):
return node.id < size
+
+def geo_split_grid(node, gle, gre, grid_id, rank, size):
+ big_dim = np.argmax(gre-gle)
+ new_pos = (gre[big_dim] + gle[big_dim])/2.
+ old_gre = gre.copy()
+ new_gle = gle.copy()
+ new_gle[big_dim] = new_pos
+ gre[big_dim] = new_pos
+
+ split = Split(big_dim, new_pos)
+
+ # Create a Split
+ divide(node, split)
+
+ # Populate Left Node
+ #print 'Inserting left node', node.left_edge, node.right_edge
+ insert_grid(node.left, gle, gre,
+ grid_id, rank, size)
+
+ # Populate Right Node
+ #print 'Inserting right node', node.left_edge, node.right_edge
+ insert_grid(node.right, new_gle, old_gre,
+ grid_id, rank, size)
+ return
+
+
def geo_split(node, gles, gres, grid_ids, rank, size):
big_dim = np.argmax(gres[0]-gles[0])
new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
@@ -128,6 +198,39 @@
node.grid = None
return
+def split_grid(node, gle, gre, grid_id, rank, size):
+ # Find a Split
+ data = np.array([(gle[:], gre[:])], copy=False)
+ best_dim, split_pos, less_id, greater_id = \
+ kdtree_get_choices(data, node.left_edge, node.right_edge)
+
+ # If best_dim is -1, then we have found a place where there are no choices.
+ # Exit out and set the node to None.
+ if best_dim == -1:
+ return -1
+
+ split = Split(best_dim, split_pos)
+
+ del data, best_dim, split_pos
+
+ # Create a Split
+ divide(node, split)
+
+ # Populate Left Node
+ #print 'Inserting left node', node.left_edge, node.right_edge
+ if less_id:
+ insert_grid(node.left, gle, gre,
+ grid_id, rank, size)
+
+ # Populate Right Node
+ #print 'Inserting right node', node.left_edge, node.right_edge
+ if greater_id:
+ insert_grid(node.right, gle, gre,
+ grid_id, rank, size)
+
+ return
+
+
def split_grids(node, gles, gres, grid_ids, rank, size):
# Find a Split
data = np.array([(gles[i,:], gres[i,:]) for i in
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -29,7 +29,7 @@
from amr_kdtools import Node, Split, kd_is_leaf, kd_sum_volume, kd_node_check, \
depth_traverse, viewpoint_traverse, add_grids, \
receive_and_reduce, send_to_parent, scatter_image, find_node, \
- depth_first_touch
+ depth_first_touch, add_grid
from yt.utilities.parallel_tools.parallel_analysis_interface \
import ParallelAnalysisInterface
from yt.utilities.lib.grid_traversal import PartitionedGrid
@@ -78,18 +78,27 @@
lvl_range = range(self.min_level, self.max_level+1)
if grids is None:
level_iter = self.pf.hierarchy.get_levels()
+ grids_added = 0
while True:
try:
grids = level_iter.next()
except:
break
- if grids[0].Level not in lvl_range: continue
- gmask = np.array([g in self.grids for g in grids])
- gles = np.array([g.LeftEdge for g in grids])[gmask]
- gres = np.array([g.RightEdge for g in grids])[gmask]
- gids = np.array([g.id for g in grids])[gmask]
- add_grids(self.trunk, gles, gres, gids, self.comm_rank, self.comm_size)
- del gles, gres, gids, grids
+ if grids[0].Level not in lvl_range:
+ continue
+ if grids_added < self.comm_size:
+ gmask = np.array([g in self.grids for g in grids])
+ gles = np.array([g.LeftEdge for g in grids])[gmask]
+ gres = np.array([g.RightEdge for g in grids])[gmask]
+ gids = np.array([g.id for g in grids])[gmask]
+ add_grids(self.trunk, gles, gres, gids, self.comm_rank,
+ self.comm_size)
+ grids_added += grids.size
+ del gles, gres, gids, grids
+ else:
+ grids_added += grids.size
+ [add_grid(self.trunk, g.LeftEdge, g.RightEdge, g.id,
+ self.comm_rank, self.comm_size) for g in grids]
else:
gles = np.array([g.LeftEdge for g in grids])
gres = np.array([g.RightEdge for g in grids])
@@ -123,15 +132,16 @@
mylog.debug('AMRKDTree volume = %e' % vol)
kd_node_check(self.trunk)
- def sum_cells(self):
+ def sum_cells(self, all_cells=False):
cells = 0
for node in depth_traverse(self):
if node.grid is None:
continue
+ if not all_cells and not kd_is_leaf(node):
+ continue
grid = self.pf.h.grids[node.grid - self._id_offset]
dds = grid.dds
gle = grid.LeftEdge
- gre = grid.RightEdge
li = np.rint((node.left_edge-gle)/dds).astype('int32')
ri = np.rint((node.right_edge-gle)/dds).astype('int32')
dims = (ri - li).astype('int32')
@@ -343,7 +353,8 @@
if (in_grid != True).sum()>0:
grids[in_grid != True] = \
- [self.pf.h.grids[self.locate_brick(new_positions[i]).grid]
+ [self.pf.h.grids[self.locate_brick(new_positions[i]).grid -
+ self._id_offset]
for i in get_them]
cis[in_grid != True] = \
[(new_positions[i]-grids[i].LeftEdge)/
@@ -380,7 +391,8 @@
"""
position = np.array(position)
- grid = self.pf.h.grids[self.locate_brick(position).grid]
+ grid = self.pf.h.grids[self.locate_brick(position).grid -
+ self._id_offset]
ci = ((position-grid.LeftEdge)/grid.dds).astype('int64')
return self.locate_neighbors(grid,ci)
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -50,7 +50,6 @@
config.add_subpackage("answer_testing")
config.add_subpackage("delaunay") # From SciPy, written by Robert Kern
config.add_subpackage("kdtree")
- config.add_data_files(('kdtree', ['kdtree/fKDpy.so']))
config.add_subpackage("spatial")
config.add_subpackage("grid_data_format")
config.add_subpackage("parallel_tools")
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/utilities/tests/test_amr_kdtree.py
--- a/yt/utilities/tests/test_amr_kdtree.py
+++ b/yt/utilities/tests/test_amr_kdtree.py
@@ -24,48 +24,46 @@
"""
from yt.utilities.amr_kdtree.api import AMRKDTree
-from yt.utilities.amr_kdtree.amr_kdtools import kd_node_check, depth_traverse
+from yt.utilities.amr_kdtree.amr_kdtools import depth_traverse
import yt.utilities.initial_conditions as ic
import yt.utilities.flagging_methods as fm
from yt.frontends.stream.api import load_uniform_grid, refine_amr
+from yt.testing import assert_equal
import numpy as np
-def test_amr_kdtree():
+
+def test_amr_kdtree_coverage():
domain_dims = (32, 32, 32)
data = np.zeros(domain_dims) + 0.25
- fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
+ fo = [ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75],
+ {"Density": (0.25, 100.0)})]
rc = [fm.flagging_method_registry["overdensity"](8.0)]
ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
pf = refine_amr(ug, rc, fo, 5)
-
+
kd = AMRKDTree(pf)
- assert(1.0 == kd.count_volume())
-
-def test_amr_kdtree_coverage():
- domain_dims = (32, 32, 32)
- data = np.zeros(domain_dims) + 0.25
- fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})]
- rc = [fm.flagging_method_registry["overdensity"](8.0)]
- ug = load_uniform_grid({'Density': data}, domain_dims, 1.0)
- pf = refine_amr(ug, rc, fo, 5)
-
- kd = AMRKDTree(pf)
+ volume = kd.count_volume()
+ yield assert_equal, volume, \
+ np.prod(pf.domain_right_edge - pf.domain_left_edge)
+ cells = kd.count_cells()
+ true_cells = pf.h.all_data().quantities['TotalQuantity']('Ones')[0]
+ yield assert_equal, cells, true_cells
# This largely reproduces the AMRKDTree.tree.check_tree() functionality
+ tree_ok = True
for node in depth_traverse(kd.tree):
if node.grid is None:
continue
grid = pf.h.grids[node.grid - kd._id_offset]
dds = grid.dds
gle = grid.LeftEdge
- gre = grid.RightEdge
li = np.rint((node.left_edge-gle)/dds).astype('int32')
ri = np.rint((node.right_edge-gle)/dds).astype('int32')
dims = (ri - li).astype('int32')
- assert(np.all(grid.LeftEdge <= node.left_edge))
- assert(np.all(grid.RightEdge >= node.right_edge))
- assert(np.all(dims > 0))
+ tree_ok *= np.all(grid.LeftEdge <= node.left_edge)
+ tree_ok *= np.all(grid.RightEdge >= node.right_edge)
+ tree_ok *= np.all(dims > 0)
-
+ yield assert_equal, True, tree_ok
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/visualization/plot_modifications.py
--- a/yt/visualization/plot_modifications.py
+++ b/yt/visualization/plot_modifications.py
@@ -361,7 +361,7 @@
pxs, pys = np.mgrid[0:0:1j,0:0:1j]
GLE = plot.data.grid_left_edge
GRE = plot.data.grid_right_edge
- grid_levels = plot.data.grid_levels
+ grid_levels = plot.data.grid_levels[:,0]
min_level = self.min_level
max_level = self.max_level
if min_level is None:
diff -r 1626fc1a36034c8a117ed175d3b255f60ef0e165 -r 60ea751d9671fd5acc73a2ed71124a23d9afa79b yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -644,8 +644,8 @@
the new maximum of the colormap scale. If 'max', will
set to the maximum value in the current view.
- Keyword Parameters
- ------------------
+ Other Parameters
+ ----------------
dyanmic_range : float (default: None)
The dynamic range of the image.
If zmin == None, will set zmin = zmax / dynamic_range
@@ -972,8 +972,8 @@
manager documentation for more details.
http://matplotlib.org/api/font_manager_api.html
- Caveats
- -------
+ Notes
+ -----
Mathtext axis labels will only obey the `size` keyword.
Examples
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list