[yt-svn] commit/yt: 19 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Jan 7 10:48:16 PST 2014
19 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/e0880d58fb3a/
Changeset: e0880d58fb3a
Branch: yt-3.0
User: jzuhone
Date: 2013-12-19 06:48:21
Summary: First set of changes to make yt run on Windows
Affected #: 12 files
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 setup.py
--- a/setup.py
+++ b/setup.py
@@ -82,12 +82,13 @@
"app/templates",
]
-for subdir in REASON_DIRS:
- dir_name = "yt/gui/reason/html/%s/" % (subdir)
- files = []
- for ext in ["js", "html", "css", "png", "ico", "gif"]:
- files += glob.glob("%s/*.%s" % (dir_name, ext))
- REASON_FILES.append((dir_name, files))
+if os.name == "posix":
+ for subdir in REASON_DIRS:
+ dir_name = "yt/gui/reason/html/%s/" % (subdir)
+ files = []
+ for ext in ["js", "html", "css", "png", "ico", "gif"]:
+ files += glob.glob("%s/*.%s" % (dir_name, ext))
+ REASON_FILES.append((dir_name, files))
# Verify that we have Cython installed
try:
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python
import setuptools
-
+import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
@@ -10,7 +10,8 @@
config.add_subpackage("absorption_spectrum")
config.add_subpackage("coordinate_transformation")
config.add_subpackage("cosmological_observation")
- config.add_subpackage("halo_finding")
+ if os.name == "posix":
+ config.add_subpackage("halo_finding")
config.add_subpackage("halo_mass_function")
config.add_subpackage("halo_merger_tree")
config.add_subpackage("halo_profiler")
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -18,7 +18,7 @@
from libc.string cimport memcpy
import data_structures
-cdef extern from "alloca.h":
+cdef extern from "malloc.h":
void *alloca(int)
cdef extern from "artio.h":
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -23,7 +23,7 @@
from fp_utils cimport *
from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
-cdef extern from "alloca.h":
+cdef extern from "malloc.h":
void *alloca(int)
cdef inline int gind(int i, int j, int k, int dims[3]):
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,7 +24,7 @@
from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
from .particle_deposit cimport sph_kernel, gind
-cdef extern from "alloca.h":
+cdef extern from "malloc.h":
void *alloca(int)
cdef struct NeighborList
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -121,8 +121,9 @@
available_analysis_modules = get_available_modules()
# Import our analysis modules
-from yt.analysis_modules.halo_finding.api import \
- HaloFinder
+if os.name == "posix":
+ from yt.analysis_modules.halo_finding.api import \
+ HaloFinder
from yt.utilities.definitions import \
axis_names, x_dict, y_dict, inv_axis_names
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -60,7 +60,7 @@
mylog.debug("SIGUSR1 registered for traceback printing")
signal.signal(signal.SIGUSR2, signal_ipython)
mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError: # Not in main thread
+except (ValueError, AttributeError) as e: # Not in main thread
pass
class SetExceptionHandling(argparse.Action):
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -1,3 +1,4 @@
+import os
"""
Compatibility module
@@ -20,7 +21,8 @@
from .Interpolators import *
from .misc_utilities import *
from .Octree import *
-from .png_writer import *
+if os.name == "posix":
+ from .png_writer import *
from .PointsInVolume import *
from .QuadTree import *
from .RayIntegrators import *
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -29,7 +29,7 @@
void FIX_LONG( unsigned )
void FIX_FLOAT( float )
-cdef extern from "alloca.h":
+cdef extern from "malloc.h":
void *alloca(int)
cdef extern from "stdio.h":
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -16,6 +16,8 @@
curdir = os.getcwd()
exit_code = 1
+ if os.name == 'nt': return False
+
try:
os.chdir(tmpdir)
@@ -99,13 +101,14 @@
config.add_extension("Octree",
["yt/utilities/lib/Octree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
- config.add_extension("png_writer",
- ["yt/utilities/lib/png_writer.pyx"],
- define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
- include_dirs=[png_inc],
- library_dirs=[png_lib],
- libraries=["m", "png"],
- depends=["yt/utilities/lib/fp_utils.pxd"]),
+ if os.name == "posix":
+ config.add_extension("png_writer",
+ ["yt/utilities/lib/png_writer.pyx"],
+ define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
+ include_dirs=[png_inc],
+ library_dirs=[png_lib],
+ libraries=["m", "png"],
+ depends=["yt/utilities/lib/fp_utils.pxd"]),
config.add_extension("PointsInVolume",
["yt/utilities/lib/PointsInVolume.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -131,7 +131,7 @@
elif os.path.exists(cfg):
return get_location_from_cfg(cfg)
# Now we see if ctypes can help us
- if os.name == 'posix':
+ if os.name == 'posix' or os.name == 'nt':
target_inc, target_lib = get_location_from_ctypes(header, library)
if None not in (target_inc, target_lib):
print(
diff -r ce44c24bd28d3d79f7c2d87091902edabbf00bac -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -42,7 +42,8 @@
from yt.funcs import \
mylog, defaultdict, iterable, ensure_list, \
fix_axis, get_image_suffix, get_ipython_api_version
-from yt.utilities.lib import write_png_to_string
+if os.name == "posix":
+ from yt.utilities.lib import write_png_to_string
from yt.utilities.definitions import \
x_dict, y_dict, \
axis_names, axis_labels, \
https://bitbucket.org/yt_analysis/yt/commits/876b17286d9b/
Changeset: 876b17286d9b
Branch: yt-3.0
User: jzuhone
Date: 2013-12-19 07:47:01
Summary: Merging
Affected #: 73 files
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -264,28 +264,45 @@
echo "Alternatively, download the Xcode command line tools from"
echo "the Apple developer tools website."
echo
- echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+ echo "OS X 10.8.4 and 10.9: download Xcode 5.02 from the mac app store."
echo "(search for Xcode)."
+ echo
echo "Additionally, you will have to manually install the Xcode"
- echo "command line tools, see:"
- echo "http://stackoverflow.com/questions/9353444"
- echo "Alternatively, download the Xcode command line tools from"
- echo "the Apple developer tools website."
+ echo "command line tools."
+ echo
+ echo "For OS X 10.8, see:"
+ echo "http://stackoverflow.com/questions/9353444"
echo
- echo "NOTE: It's possible that the installation will fail, if so,"
- echo "please set the following environment variables, remove any"
- echo "broken installation tree, and re-run this script verbatim."
- echo
- echo "$ export CC=gcc"
- echo "$ export CXX=g++"
- echo
- OSX_VERSION=`sw_vers -productVersion`
- if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ echo "For OS X 10.9, the command line tools can be installed"
+ echo "with the following command:"
+ echo " xcode-select --install"
+ echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
then
MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
fi
fi
+ if [ -f /etc/redhat-release ]
+ then
+ echo "Looks like you're on an Redhat-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * openssl-devel"
+ echo " * uuid-devel"
+ echo " * readline-devel"
+ echo " * ncurses-devel"
+ echo " * zip"
+ echo " * gcc-{,c++,gfortran}"
+ echo " * make"
+ echo " * patch"
+ echo
+ echo "You can accomplish this by executing:"
+ echo "$ sudo yum install gcc gcc-g++ gcc-gfortran make patch zip"
+ echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
+ fi
if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
then
echo "Looks like you're on an OpenSUSE-compatible machine."
@@ -566,16 +583,16 @@
CYTHON='Cython-0.19.1'
FORTHON='Forthon-0.8.11'
PYX='PyX-0.12.1'
-PYTHON='Python-2.7.5'
+PYTHON='Python-2.7.6'
BZLIB='bzip2-1.0.6'
FREETYPE_VER='freetype-2.4.12'
H5PY='h5py-2.1.3'
HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.0.0'
+IPYTHON='ipython-1.1.0'
LAPACK='lapack-3.4.2'
PNG=libpng-1.6.3
MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.7'
+MERCURIAL='mercurial-2.8'
NOSE='nose-1.3.0'
NUMPY='numpy-1.7.1'
PYTHON_HGLIB='python-hglib-1.0'
@@ -585,14 +602,14 @@
SQLITE='sqlite-autoconf-3071700'
SYMPY='sympy-0.7.3'
TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.3'
+ZEROMQ='zeromq-3.2.4'
ZLIB='zlib-1.2.8'
# Now we dump all our SHA512 files out.
echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79 Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
@@ -600,11 +617,11 @@
echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8 mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
@@ -614,7 +631,7 @@
echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6 zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -1006,10 +1023,7 @@
echo
echo "To get started with yt, check out the orientation:"
echo
- echo " http://yt-project.org/doc/orientation/"
- echo
- echo "or just activate your environment and run 'yt serve' to bring up the"
- echo "yt GUI."
+ echo " http://yt-project.org/doc/bootcamp/"
echo
echo "The source for yt is located at:"
echo " $YT_DIR"
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -25,7 +25,7 @@
from yt.convenience import \
load
from yt.data_objects.profiles import \
- BinnedProfile1D, EmptyProfileData
+ BinnedProfile1D, YTEmptyProfileData
from yt.analysis_modules.halo_finding.api import *
from .halo_filters import \
VirialFilter
@@ -588,7 +588,7 @@
profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
r_min, halo['r_max'],
log_space=True, end_collect=True)
- except EmptyProfileData:
+ except YTEmptyProfileData:
mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
return None
# Figure out which fields to add simultaneously
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
+ tree = amr_utils.ContourTree()
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- tree = []
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
- grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
+ nid = node.node_id
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
+ total_contours += new_contours.shape[0]
+ tree.add_contours(new_contours)
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
+ node_ids = np.array(node_ids)
+ trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
+ joins = tree.export()
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff, final_joins)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
- pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ rv = dict()
+ rv.update(contour_ids)
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -250,6 +250,7 @@
hubble = getattr(pf, "hubble_constant", None)
omega_m = getattr(pf, "omega_matter", None)
omega_l = getattr(pf, "omega_lambda", None)
+ if hubble == 0: hubble = None
if hubble is not None and \
omega_m is not None and \
omega_l is not None:
@@ -948,9 +949,9 @@
col1 = pyfits.Column(name='ENERGY', format='E',
array=self["eobs"])
col2 = pyfits.Column(name='DEC', format='D',
+ array=self["ysky"])
+ col3 = pyfits.Column(name='RA', format='D',
array=self["xsky"])
- col3 = pyfits.Column(name='RA', format='D',
- array=self["ysky"])
coldefs = pyfits.ColDefs([col1, col2, col3])
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -19,7 +19,10 @@
import numpy as np
import os
-from yt.funcs import *
+from yt.funcs import \
+ download_file, \
+ mylog, \
+ only_on_root
from yt.data_objects.field_info_container import add_field
from yt.utilities.exceptions import YTException
@@ -31,6 +34,23 @@
xray_data_version = 1
+def _get_data_file():
+ data_file = "xray_emissivity.h5"
+ data_url = "http://yt-project.org/data"
+ if "YT_DEST" in os.environ and \
+ os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
+ data_dir = os.path.join(os.environ["YT_DEST"], "data")
+ else:
+ data_dir = "."
+ data_path = os.path.join(data_dir, data_file)
+ if not os.path.exists(data_path):
+ mylog.info("Attempting to download supplementary data from %s to %s." %
+ (data_url, data_dir))
+ fn = download_file(os.path.join(data_url, data_file), data_path)
+ if fn != data_path:
+ raise RuntimeError, "Failed to download supplementary data."
+ return data_path
+
class EnergyBoundsException(YTException):
def __init__(self, lower, upper):
self.lower = lower
@@ -65,8 +85,7 @@
default_filename = False
if filename is None:
- filename = os.path.join(os.environ["YT_DEST"],
- "data", "xray_emissivity.h5")
+ filename = _get_data_file()
default_filename = True
if not os.path.exists(filename):
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
#-----------------------------------------------------------------------------
from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.utilities.fits_image import FITSImageBuffer
from yt.data_objects.image_array import ImageArray
from yt.data_objects.field_info_container import add_field
from yt.funcs import fix_axis, mylog, iterable, get_pbar
from yt.utilities.definitions import inv_axis_names
-from yt.visualization.image_writer import write_fits, write_projection
from yt.visualization.volume_rendering.camera import off_axis_projection
from yt.utilities.parallel_tools.parallel_analysis_interface import \
communication_system, parallel_root_only
@@ -272,32 +272,52 @@
self.data["TeSZ"] = ImageArray(Te)
@parallel_root_only
- def write_fits(self, filename, clobber=True):
+ def write_fits(self, filename, sky_center=None, sky_scale=None, clobber=True):
r""" Export images to a FITS file. Writes the SZ distortion in all
specified frequencies as well as the mass-weighted temperature and the
- optical depth. Distance units are in kpc.
+ optical depth. Distance units are in kpc, unless *sky_center*
+ and *scale* are specified.
Parameters
----------
filename : string
The name of the FITS file to be written.
+ sky_center : tuple of floats, optional
+ The center of the observation in (RA, Dec) in degrees. Only used if
+ converting to sky coordinates.
+ sky_scale : float, optional
+ Scale between degrees and kpc. Only used if
+ converting to sky coordinates.
clobber : boolean, optional
If the file already exists, do we overwrite?
Examples
--------
+ >>> # This example just writes out a FITS file with kpc coords
>>> szprj.write_fits("SZbullet.fits", clobber=False)
+ >>> # This example uses sky coords
+ >>> sky_scale = 1./3600. # One arcsec per kpc
+ >>> sky_center = (30., 45.) # In degrees
+ >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
"""
- coords = {}
- coords["dx"] = self.dx*self.pf.units["kpc"]
- coords["dy"] = self.dy*self.pf.units["kpc"]
- coords["xctr"] = 0.0
- coords["yctr"] = 0.0
- coords["units"] = "kpc"
- other_keys = {"Time" : self.pf.current_time}
- write_fits(self.data, filename, clobber=clobber, coords=coords,
- other_keys=other_keys)
+ deltas = np.array([self.dx*self.pf.units["kpc"],
+ self.dy*self.pf.units["kpc"]])
+
+ if sky_center is None:
+ center = [0.0]*2
+ units = "kpc"
+ else:
+ center = sky_center
+ units = "deg"
+ deltas *= sky_scale
+
+ fib = FITSImageBuffer(self.data, fields=self.data.keys(),
+ center=center, units=units,
+ scale=deltas)
+ fib.update_all_headers("Time", self.pf.current_time)
+ fib.writeto(filename, clobber=clobber)
+
@parallel_root_only
def write_png(self, filename_prefix, cmap_name="algae",
log_fields=None):
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
@@ -128,6 +130,20 @@
if simulation_type not in simulation_time_series_registry:
raise YTSimulationNotIdentified(simulation_type)
+ if os.path.exists(parameter_filename):
+ valid_file = True
+ elif os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"),
+ parameter_filename)):
+ parameter_filename = os.path.join(ytcfg.get("yt", "test_data_dir"),
+ parameter_filename)
+ valid_file = True
+ else:
+ valid_file = False
+
+ if not valid_file:
+ raise YTOutputNotIdentified((parameter_filename, simulation_type),
+ dict(find_outputs=find_outputs))
+
return simulation_time_series_registry[simulation_type](parameter_filename,
find_outputs=find_outputs)
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,12 @@
particle_handler_registry
from profiles import \
- EmptyProfileData, \
+ YTEmptyProfileData, \
BinnedProfile, \
BinnedProfile1D, \
BinnedProfile2D, \
- BinnedProfile3D
+ BinnedProfile3D, \
+ create_profile
from time_series import \
TimeSeriesData, \
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
@@ -308,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()#[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -351,6 +353,24 @@
else:
self.hierarchy.save_object(self, name)
+ def to_glue(self, fields, label="yt"):
+ """
+ Takes specific *fields* in the container and exports them to
+ Glue (http://www.glueviz.org) for interactive
+ analysis. Optionally add a *label*.
+ """
+ from glue.core import DataCollection, Data
+ from glue.core.coordinates import coordinates_from_header
+ from glue.qt.glue_application import GlueApplication
+
+ gdata = Data(label=label)
+ for component_name in fields:
+ gdata.add_component(self[component_name], component_name)
+ dc = DataCollection([gdata])
+
+ app = GlueApplication(dc)
+ app.start()
+
def __reduce__(self):
args = tuple([self.pf._hash(), self._type_name] +
[getattr(self, n) for n in self._con_args] +
@@ -365,6 +385,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
@@ -407,6 +434,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
@@ -751,11 +786,13 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -763,19 +800,12 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
@@ -966,12 +996,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +1012,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -988,10 +1019,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -60,21 +60,21 @@
def _gradx(f, data):
grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
- grad /= 2.0*data["dx"].flat[0]
+ grad /= 2.0*data["dx"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
def _grady(f, data):
grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
- grad /= 2.0*data["dy"].flat[0]
+ grad /= 2.0*data["dy"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
def _gradz(f, data):
grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
- grad /= 2.0*data["dz"].flat[0]
+ grad /= 2.0*data["dz"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -112,6 +112,10 @@
_domain_ind = None
+ def mask_refinement(self, selector):
+ mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
+ return mask
+
def select_blocks(self, selector):
mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
mask = self._reshape_vals(mask)
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -20,16 +20,10 @@
from yt.data_objects.data_containers import YTFieldData
from yt.utilities.lib import bin_profile1d, bin_profile2d, bin_profile3d
+from yt.utilities.lib import new_bin_profile1d, new_bin_profile2d, \
+ new_bin_profile3d
from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-
-_field_mapping = {
- "total_mass": ("CellMassMsun", "ParticleMassMsun"),
- "hybrid_radius": ("RadiusCode", "ParticleRadiusCode"),
- }
-
-class EmptyProfileData(Exception):
- pass
+ ParallelAnalysisInterface, parallel_objects
def preserve_source_parameters(func):
def save_state(*args, **kwargs):
@@ -55,7 +49,6 @@
self._data_source = data_source
self.pf = data_source.pf
self.field_data = YTFieldData()
- self._pdata = {}
@property
def hierarchy(self):
@@ -131,14 +124,11 @@
def __setitem__(self, key, value):
self.field_data[key] = value
- def _get_field(self, source, this_field, check_cut):
+ def _get_field(self, source, field, check_cut):
# This is where we will iterate to get all contributions to a field
# which is how we will implement hybrid particle/cell fields
# but... we default to just the field.
- data = []
- for field in _field_mapping.get(this_field, (this_field,)):
- data.append(source[field].astype('float64'))
- return np.concatenate(data, axis=0)
+ return source[field].astype('float64')
def _fix_pickle(self):
if isinstance(self._data_source, tuple):
@@ -225,7 +215,7 @@
def _get_bins(self, source, check_cut=False):
source_data = self._get_field(source, self.bin_field, check_cut)
if source_data.size == 0: # Nothing for us here.
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
# Truncate at boundaries.
if self.end_collect:
mi = np.ones_like(source_data).astype('bool')
@@ -234,7 +224,7 @@
& (source_data < self._bins.max()))
sd = source_data[mi]
if sd.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
# Stick the bins into our fixed bins, set at initialization
bin_indices = np.digitize(sd, self._bins)
if self.end_collect: #limit the range of values to 0 and n_bins-1
@@ -400,7 +390,7 @@
source_data_x = self._get_field(source, self.x_bin_field, check_cut)
source_data_y = self._get_field(source, self.y_bin_field, check_cut)
if source_data_x.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
if self.end_collect:
mi = np.arange(source_data_x.size)
@@ -412,7 +402,7 @@
sd_x = source_data_x[mi]
sd_y = source_data_y[mi]
if sd_x.size == 0 or sd_y.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -518,48 +508,6 @@
return np.log10(upper), np.log10(lower)
return upper, lower
-class BinnedProfile2DInlineCut(BinnedProfile2D):
- def __init__(self, data_source,
- x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
- y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
- end_collect=False):
- self.indices = data_source["Ones"].astype("bool")
- BinnedProfile2D.__init__(self, data_source,
- x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
- y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
- end_collect)
-
- @preserve_source_parameters
- def _bin_field(self, source, field, weight, accumulation,
- args, check_cut=False):
- source_data = self._get_field(source, field, check_cut)
- if weight: weight_data = self._get_field(source, weight, check_cut)
- else: weight_data = np.ones(source_data.shape, dtype='float64')
- self.total_stuff = source_data.sum()
- binned_field = self._get_empty_field()
- weight_field = self._get_empty_field()
- m_field = self._get_empty_field()
- q_field = self._get_empty_field()
- used_field = self._get_empty_field()
- mi = args[0]
- bin_indices_x = args[1][self.indices].ravel().astype('int64')
- bin_indices_y = args[2][self.indices].ravel().astype('int64')
- source_data = source_data[mi][self.indices]
- weight_data = weight_data[mi][self.indices]
- nx = bin_indices_x.size
- #mylog.debug("Binning %s / %s times", source_data.size, nx)
- bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
- weight_field, binned_field, m_field, q_field, used_field)
- if accumulation: # Fix for laziness
- if not iterable(accumulation):
- raise SyntaxError("Accumulation needs to have length 2")
- if accumulation[0]:
- binned_field = np.add.accumulate(binned_field, axis=0)
- if accumulation[1]:
- binned_field = np.add.accumulate(binned_field, axis=1)
- return binned_field, weight_field, used_field.astype('bool')
-
-
class BinnedProfile3D(BinnedProfile):
"""
A 'Profile' produces either a weighted (or unweighted) average
@@ -658,7 +606,7 @@
source_data_y = self._get_field(source, self.y_bin_field, check_cut)
source_data_z = self._get_field(source, self.z_bin_field, check_cut)
if source_data_x.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
if self.end_collect:
mi = np.arange(source_data_x.size)
else:
@@ -672,7 +620,7 @@
sd_y = source_data_y[mi]
sd_z = source_data_z[mi]
if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -790,25 +738,280 @@
self._data_source.hierarchy.save_data(values, "/Profiles", name,
set_attr, force=force)
-class StoredBinnedProfile3D(BinnedProfile3D):
- def __init__(self, pf, name):
- """
- Given a *pf* parameterfile and the *name* of a stored profile, retrieve
- it into a read-only data structure.
- """
+class ProfileFieldAccumulator(object):
+ def __init__(self, n_fields, size):
+ shape = size + (n_fields,)
+ self.values = np.zeros(shape, dtype="float64")
+ self.mvalues = np.zeros(shape, dtype="float64")
+ self.qvalues = np.zeros(shape, dtype="float64")
+ self.used = np.zeros(size, dtype='bool')
+ self.weight_values = np.zeros(size, dtype="float64")
+
+class ProfileND(ParallelAnalysisInterface):
+ def __init__(self, data_source, weight_field = None):
+ self.data_source = data_source
+ self.pf = data_source.pf
self.field_data = YTFieldData()
- prof_arr = pf.h.get_data("/Profiles", name)
- if prof_arr is None: raise KeyError("No such array")
- for ax in 'xyz':
- for base in ['%s_bin_field', '_%s_log']:
- setattr(self, base % ax, prof_arr.getAttr(base % ax))
- for ax in 'xyz':
- fn = getattr(self, '%s_bin_field' % ax)
- self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
- shape = prof_arr.getAttr('shape')
- for fn, fd in zip(prof_arr.getAttr('field_order'),
- prof_arr.read().transpose()):
- self.field_data[fn] = fd.reshape(shape)
+ self.weight_field = weight_field
- def add_fields(self, *args, **kwargs):
- raise RuntimeError("Sorry, you can't add to a stored profile.")
+ def add_fields(self, fields):
+ fields = ensure_list(fields)
+ temp_storage = ProfileFieldAccumulator(len(fields), self.size)
+ for g in parallel_objects(self.data_source._grids):
+ self._bin_grid(g, fields, temp_storage)
+ self._finalize_storage(fields, temp_storage)
+
+ def _finalize_storage(self, fields, temp_storage):
+ # We use our main comm here
+ # This also will fill _field_data
+ # FIXME: Add parallelism and combining std stuff
+ if self.weight_field is not None:
+ temp_storage.values /= temp_storage.weight_values[...,None]
+ blank = ~temp_storage.used
+ for i, field in enumerate(fields):
+ self.field_data[field] = temp_storage.values[...,i]
+ self.field_data[field][blank] = 0.0
+
+ def _bin_grid(self, grid, fields, storage):
+ raise NotImplementedError
+
+ def _filter(self, bin_fields, cut_points):
+ # cut_points is initially just the points inside our region
+ # we also want to apply a filtering based on min/max
+ filter = np.zeros(bin_fields[0].shape, dtype='bool')
+ filter[cut_points] = True
+ for (mi, ma), data in zip(self.bounds, bin_fields):
+ filter &= (data > mi)
+ filter &= (data < ma)
+ return filter, [data[filter] for data in bin_fields]
+
+ def _get_data(self, grid, fields):
+ # Save the values in the grid beforehand.
+ old_params = grid.field_parameters
+ old_keys = grid.field_data.keys()
+ grid.field_parameters = self.data_source.field_parameters
+ # Now we ask our source which values to include
+ pointI = self.data_source._get_point_indices(grid)
+ bin_fields = [grid[bf] for bf in self.bin_fields]
+ # We want to make sure that our fields are within the bounds of the
+ # binning
+ filter, bin_fields = self._filter(bin_fields, pointI)
+ if not np.any(filter): return None
+ arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
+ for i, field in enumerate(fields):
+ arr[:,i] = grid[field][filter]
+ if self.weight_field is not None:
+ weight_data = grid[self.weight_field]
+ else:
+ weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+ weight_data = weight_data[filter]
+ # So that we can pass these into
+ grid.field_parameters = old_params
+ grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
+ return arr, weight_data, bin_fields
+
+ def __getitem__(self, key):
+ return self.field_data[key]
+
+ def __iter__(self):
+ return sorted(self.field_data.items())
+
+ def _get_bins(self, mi, ma, n, take_log):
+ if take_log:
+ return np.logspace(np.log10(mi), np.log10(ma), n+1)
+ else:
+ return np.linspace(mi, ma, n+1)
+
+class Profile1D(ProfileND):
+ def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
+ weight_field = None):
+ super(Profile1D, self).__init__(data_source, weight_field)
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+
+ self.size = (self.x_bins.size - 1,)
+ self.bin_fields = (self.x_field,)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
+ self.x = self.x_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ gd = self._get_data(grid, fields)
+ if gd is None: return
+ fdata, wdata, (bf_x,) = gd
+ bin_ind = np.digitize(bf_x, self.x_bins) - 1
+ new_bin_profile1d(bin_ind, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+class Profile2D(ProfileND):
+ def __init__(self, data_source,
+ x_field, x_n, x_min, x_max, x_log,
+ y_field, y_n, y_min, y_max, y_log,
+ weight_field = None):
+ super(Profile2D, self).__init__(data_source, weight_field)
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+ self.y_field = y_field
+ self.y_log = y_log
+ self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+
+ self.size = (self.x_bins.size - 1, self.y_bins.size - 1)
+
+ self.bin_fields = (self.x_field, self.y_field)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+ (self.y_bins[0], self.y_bins[-1]))
+ self.x = self.x_bins
+ self.y = self.y_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ rv = self._get_data(grid, fields)
+ if rv is None: return
+ fdata, wdata, (bf_x, bf_y) = rv
+ bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+ bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+ new_bin_profile2d(bin_ind_x, bin_ind_y, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+class Profile3D(ProfileND):
+ def __init__(self, data_source,
+ x_field, x_n, x_min, x_max, x_log,
+ y_field, y_n, y_min, y_max, y_log,
+ z_field, z_n, z_min, z_max, z_log,
+ weight_field = None):
+ super(Profile3D, self).__init__(data_source, weight_field)
+ # X
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+ # Y
+ self.y_field = y_field
+ self.y_log = y_log
+ self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+ # Z
+ self.z_field = z_field
+ self.z_log = z_log
+ self.z_bins = self._get_bins(z_min, z_max, z_n, z_log)
+
+ self.size = (self.x_bins.size - 1,
+ self.y_bins.size - 1,
+ self.z_bins.size - 1)
+
+ self.bin_fields = (self.x_field, self.y_field, self.z_field)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+ (self.y_bins[0], self.y_bins[-1]),
+ (self.z_bins[0], self.z_bins[-1]))
+
+ self.x = self.x_bins
+ self.y = self.y_bins
+ self.z = self.z_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ rv = self._get_data(grid, fields)
+ if rv is None: return
+ fdata, wdata, (bf_x, bf_y, bf_z) = rv
+ bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+ bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+ bin_ind_z = np.digitize(bf_z, self.z_bins) - 1
+ new_bin_profile3d(bin_ind_x, bin_ind_y, bin_ind_z, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+def create_profile(data_source, bin_fields, n = 64,
+ weight_field = "CellMass", fields = None,
+ accumulation = False, fractional = False):
+ r"""
+ Create a 1, 2, or 3D profile object.
+
+ The dimensionality of the profile object is chosen by the number of
+ fields given in the bin_fields argument.
+
+ Parameters
+ ----------
+ data_source : AMR3DData Object
+ The data object to be profiled.
+ bin_fields : list of strings
+ List of the binning fields for profiling.
+ n : int or list of ints
+ The number of bins in each dimension. If None, 64 bins for
+ each bin are used for each bin field.
+ Default: 64.
+ weight_field : str
+ The weight field for computing weighted average for the profile
+ values. If None, the profile values are sums of the data in
+ each bin.
+ fields : list of strings
+ The fields to be profiled.
+ accumulation : bool or list of bools
+ If True, the profile values for a bin n are the cumulative sum of
+ all the values from bin 0 to n. If -True, the sum is reversed so
+ that the value for bin n is the cumulative sum from bin N (total bins)
+ to n. If the profile is 2D or 3D, a list of values can be given to
+ control the summation in each dimension independently.
+ Default: False.
+ fractional : If True the profile values are divided by the sum of all
+ the profile data such that the profile represents a probability
+ distribution function.
+
+ Examples
+ --------
+
+ Create a 1d profile. Access bin field from profile.x and field
+ data from profile.field_data.
+
+ >>> pf = load("DD0046/DD0046")
+ >>> ad = pf.h.all_data()
+ >>> profile = create_profile(ad, ["Density"],
+ ... fields=["Temperature", "x-velocity"]))
+ >>> print profile.x
+ >>> print profile.field_data["Temperature"]
+
+ """
+ if len(bin_fields) == 1:
+ cls = Profile1D
+ elif len(bin_fields) == 2:
+ cls = Profile2D
+ elif len(bin_fields) == 3:
+ cls = Profile3D
+ else:
+ raise NotImplementedError
+ if not iterable(n):
+ n = [n] * len(bin_fields)
+ if not iterable(accumulation):
+ accumulation = [accumulation] * len(bin_fields)
+ logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+ ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
+ for f, l in zip(bin_fields, logs)]
+ args = [data_source]
+ for f, n, (mi, ma), l in zip(bin_fields, n, ex, logs):
+ args += [f, n, mi, ma, l]
+ obj = cls(*args, weight_field = weight_field)
+ setattr(obj, "accumulation", accumulation)
+ setattr(obj, "fractional", fractional)
+ if fields is not None:
+ obj.add_fields(fields)
+ for field in fields:
+ if fractional:
+ obj.field_data[field] /= obj.field_data[field].sum()
+ for axis, acc in enumerate(accumulation):
+ if not acc: continue
+ temp = obj.field_data[field]
+ temp = np.rollaxis(temp, axis)
+ if acc < 0:
+ temp = temp[::-1]
+ temp = temp.cumsum(axis=0)
+ if acc < 0:
+ temp = temp[::-1]
+ temp = np.rollaxis(temp, axis)
+ obj.field_data[field] = temp
+
+ return obj
+
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+ """
+ _type_name = "cut_region"
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
+ field_parameters = None):
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
+ # Now we can test some projections
dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,10 +1,11 @@
from yt.testing import *
from yt.data_objects.profiles import \
- BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+ BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+ Profile1D, Profile2D, Profile3D
_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
-def test_profiles():
+def test_binned_profiles():
pf = fake_random_pf(64, nprocs = 8, fields = _fields)
nv = pf.domain_dimensions.prod()
dd = pf.h.all_data()
@@ -71,3 +72,83 @@
p3d.add_fields(["Ones"], weight="Temperature")
yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+def test_profiles():
+ pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+ nv = pf.domain_dimensions.prod()
+ dd = pf.h.all_data()
+ (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+ ["Density", "Temperature", "Dinosaurs"])
+ rt, tt, dt = dd.quantities["TotalQuantity"](
+ ["Density", "Temperature", "Dinosaurs"])
+ # First we look at the
+ e1, e2 = 0.9, 1.1
+ for nb in [8, 16, 32, 64]:
+ # We log all the fields or don't log 'em all. No need to do them
+ # individually.
+ for lf in [True, False]:
+ p1d = Profile1D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ weight_field = None)
+ p1d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p1d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+ p2d = Profile2D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ "Temperature", nb, tmi*e1, tma*e2, lf,
+ weight_field = None)
+ p2d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p2d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+ p3d = Profile3D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ "Temperature", nb, tmi*e1, tma*e2, lf,
+ "Dinosaurs", nb, dmi*e1, dma*e2, lf,
+ weight_field = None)
+ p3d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p3d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+ p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p1d.add_fields("Ones")
+ av = nv / nb
+ yield assert_equal, p1d["Ones"], np.ones(nb)*av
+
+ # We re-bin ones with a weight now
+ p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p1d.add_fields(["Ones"])
+ yield assert_equal, p1d["Ones"], np.ones(nb)
+
+ p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p2d.add_fields("Ones")
+ av = nv / nb**2
+ yield assert_equal, p2d["Ones"], np.ones((nb, nb))*av
+
+ # We re-bin ones with a weight now
+ p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p2d.add_fields(["Ones"])
+ yield assert_equal, p2d["Ones"], np.ones((nb, nb))
+
+ p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ "z", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p3d.add_fields("Ones")
+ av = nv / nb**3
+ yield assert_equal, p3d["Ones"], np.ones((nb, nb, nb))*av
+
+ # We re-bin ones with a weight now
+ p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ "z", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p3d.add_fields(["Ones"])
+ yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
+
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -90,9 +90,13 @@
... SlicePlot(pf, "x", "Density").save()
"""
- def __init__(self, outputs, parallel = True ,**kwargs):
+ def __init__(self, outputs, parallel = True, setup_function = None,
+ **kwargs):
self.tasks = AnalysisTaskProxy(self)
self.params = TimeSeriesParametersContainer(self)
+ if setup_function is None:
+ setup_function = lambda a: None
+ self._setup_function = setup_function
self._pre_outputs = outputs[:]
for type_name in data_object_registry:
setattr(self, type_name, functools.partial(
@@ -104,7 +108,9 @@
# We can make this fancier, but this works
for o in self._pre_outputs:
if isinstance(o, types.StringTypes):
- yield load(o,**self.kwargs)
+ pf = load(o, **self.kwargs)
+ self._setup_function(pf)
+ yield pf
else:
yield o
@@ -116,7 +122,8 @@
return TimeSeriesData(self._pre_outputs[key], self.parallel)
o = self._pre_outputs[key]
if isinstance(o, types.StringTypes):
- o = load(o,**self.kwargs)
+ o = load(o, **self.kwargs)
+ self._setup_function(o)
return o
def __len__(self):
@@ -163,7 +170,12 @@
This demonstrates how one might store results:
- >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy")
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
+ >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy",
+ ... setup_function = print_time )
+ ...
>>> my_storage = {}
>>> for sto, pf in ts.piter(storage=my_storage):
... v, c = pf.h.find_max("Density")
@@ -215,7 +227,8 @@
return [v for k, v in sorted(return_values.items())]
@classmethod
- def from_filenames(cls, filenames, parallel = True, **kwargs):
+ def from_filenames(cls, filenames, parallel = True, setup_function = None,
+ **kwargs):
r"""Create a time series from either a filename pattern or a list of
filenames.
@@ -239,12 +252,19 @@
this is set to either True or an integer, it will be iterated with
1 or that integer number of processors assigned to each parameter
file provided to the loop.
+ setup_function : callable, accepts a pf
+ This function will be called whenever a parameter file is loaded.
Examples
--------
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
>>> ts = TimeSeriesData.from_filenames(
- "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0")
+ ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",
+ ... setup_function = print_time)
+ ...
>>> for pf in ts:
... SlicePlot(pf, "x", "Density").save()
@@ -262,7 +282,8 @@
else:
filenames = glob.glob(filenames)
filenames.sort()
- obj = cls(filenames[:], parallel = parallel, **kwargs)
+ obj = cls(filenames[:], parallel = parallel,
+ setup_function = setup_function, **kwargs)
return obj
@classmethod
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/extern/progressbar/progressbar.py
--- a/yt/extern/progressbar/progressbar.py
+++ b/yt/extern/progressbar/progressbar.py
@@ -410,7 +410,9 @@
from IPython.display import Javascript, display
# First delete the node that held the progress bar from the page
js = """var element = document.getElementById('%s');
- element.parentNode.removeChild(element);""" % self.uuid
+ var parent = element.parentNode
+ parent.removeChild(element);
+ parent.parentElement.remove();""" % self.uuid
display(Javascript(js))
# Then also remove its trace from the cell output (so it doesn't get
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -459,3 +459,23 @@
validators=[ValidateParameter("normal"),
ValidateParameter("center")])
+
+def add_particle_average(registry, ptype, field_name,
+ weight = "particle_mass",
+ density = True):
+ def _pfunc_avg(field, data):
+ pos = data[ptype, "Coordinates"]
+ f = data[ptype, field_name]
+ wf = data[ptype, weight]
+ f *= wf
+ v = data.deposit(pos, [f], method = "sum")
+ w = data.deposit(pos, [wf], method = "sum")
+ v /= w
+ if density: v /= data["CellVolume"]
+ v[np.isnan(v)] = 0.0
+ return v
+ fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
+ registry.add_field(fn, function=_pfunc_avg,
+ validators = [ValidateSpatial(0)],
+ particle_type = False)
+ return fn
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ projection_conversion="1",
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -26,6 +26,8 @@
StaticOutput
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+ get_box_grids_level
from .fields import AthenaFieldInfo, KnownAthenaFields
from yt.data_objects.field_info_container import \
@@ -109,7 +111,7 @@
self.hierarchy_filename = self.parameter_file.filename
#self.directory = os.path.dirname(self.hierarchy_filename)
self._fhandle = file(self.hierarchy_filename,'rb')
- AMRHierarchy.__init__(self, pf, data_style)
+ GridGeometryHandler.__init__(self, pf, data_style)
self._fhandle.close()
@@ -161,7 +163,7 @@
def _setup_classes(self):
dd = self._get_data_reader_dict()
- AMRHierarchy._setup_classes(self, dd)
+ GridGeometryHandler._setup_classes(self, dd)
self.object_types.sort()
def _count_grids(self):
@@ -264,7 +266,7 @@
# know the extent of all the grids.
glis = np.round((glis - self.parameter_file.domain_left_edge)/gdds).astype('int')
new_dre = np.max(gres,axis=0)
- self.parameter_file.domain_right_edge = np.round(new_dre, decimals=6)
+ self.parameter_file.domain_right_edge = np.round(new_dre, decimals=12)
self.parameter_file.domain_width = \
(self.parameter_file.domain_right_edge -
self.parameter_file.domain_left_edge)
@@ -292,9 +294,9 @@
dxs.append(dx)
dx = np.array(dxs)
- self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
+ self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=12)
self.grid_dimensions = gdims.astype("int32")
- self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+ self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=12)
if self.parameter_file.dimensionality <= 2:
self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
if self.parameter_file.dimensionality == 1:
@@ -305,13 +307,33 @@
for g in self.grids:
g._prepare_grid()
g._setup_dx()
+ self._reconstruct_parent_child()
+ """
for g in self.grids:
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
+ """
self.max_level = self.grid_levels.max()
+ def _reconstruct_parent_child(self):
+ mask = np.empty(len(self.grids), dtype='int32')
+ mylog.debug("First pass; identifying child grids")
+ for i, grid in enumerate(self.grids):
+ get_box_grids_level(self.grid_left_edge[i,:],
+ self.grid_right_edge[i,:],
+ self.grid_levels[i] + 1,
+ self.grid_left_edge, self.grid_right_edge,
+ self.grid_levels, mask)
+ #ids = np.where(mask.astype("bool")) # where is a tuple
+ #mask[ids] = True
+ grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
+ mylog.debug("Second pass; identifying parents")
+ for i, grid in enumerate(self.grids): # Second pass
+ for child in grid.Children:
+ child.Parent.append(grid)
+
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -78,7 +78,7 @@
def _convertDensity(data) :
return data.convert("Density")
def _density(field, data) :
- return data["density"]
+ return data["density"].copy()
add_field("Density", function=_density, take_log=False,
units=r"\rm{g}/\rm{cm}^3", projected_units=r"\rm{g}/\rm{cm}^2",
convert_function=_convertDensity)
@@ -87,21 +87,21 @@
return data.convert("x-velocity")
def _xvelocity(field, data):
if "velocity_x" in data.pf.field_info:
- return data["velocity_x"]
+ return data["velocity_x"].copy()
else:
return data["momentum_x"]/data["density"]
add_field("x-velocity", function=_xvelocity, take_log=False,
units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
def _yvelocity(field, data):
if "velocity_y" in data.pf.field_info:
- return data["velocity_y"]
+ return data["velocity_y"].copy()
else:
return data["momentum_y"]/data["density"]
add_field("y-velocity", function=_yvelocity, take_log=False,
units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
def _zvelocity(field, data):
if "velocity_z" in data.pf.field_info:
- return data["velocity_z"]
+ return data["velocity_z"].copy()
else:
return data["momentum_z"]/data["density"]
add_field("z-velocity", function=_zvelocity, take_log=False,
@@ -128,7 +128,7 @@
return data.convert("Density")*data.convert("x-velocity")**2
def _pressure(field, data) :
if "pressure" in data.pf.field_info:
- return data["pressure"]
+ return data["pressure"].copy()
else:
eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
data["momentum_y"]**2 +
@@ -152,19 +152,19 @@
units=r"\rm{K}")
def _convertBfield(data):
- return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
+ return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
def _Bx(field, data):
- return data['cell_centered_B_x']
+ return data['cell_centered_B_x'].copy()
add_field("Bx", function=_Bx, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_x",
convert_function=_convertBfield)
def _By(field, data):
- return data['cell_centered_B_y']
+ return data['cell_centered_B_y'].copy()
add_field("By", function=_By, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_y",
convert_function=_convertBfield)
def _Bz(field, data):
- return data['cell_centered_B_z']
+ return data['cell_centered_B_z'].copy()
add_field("Bz", function=_Bz, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_z",
convert_function=_convertBfield)
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -15,6 +15,7 @@
from yt.utilities.io_handler import \
BaseIOHandler
import numpy as np
+from yt.funcs import mylog, defaultdict
class IOHandlerAthena(BaseIOHandler):
_data_style = "athena"
@@ -30,35 +31,45 @@
def _read_field_names(self,grid):
pass
- def _read_data(self,grid,field):
- f = file(grid.filename, 'rb')
- dtype, offsetr = grid.hierarchy._field_map[field]
- grid_ncells = np.prod(grid.ActiveDimensions)
- grid_dims = grid.ActiveDimensions
- grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
- read_table_offset = get_read_table_offset(f)
- if grid_ncells != grid0_ncells:
- offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
- if grid_ncells == grid0_ncells:
- offset = offsetr
- f.seek(read_table_offset+offset)
- if dtype == 'scalar':
- data = np.fromfile(f, dtype='>f4',
- count=grid_ncells).reshape(grid_dims,order='F').copy()
- if dtype == 'vector':
- data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
- if '_x' in field:
- data = data[0::3].reshape(grid_dims,order='F').copy()
- elif '_y' in field:
- data = data[1::3].reshape(grid_dims,order='F').copy()
- elif '_z' in field:
- data = data[2::3].reshape(grid_dims,order='F').copy()
- f.close()
- if grid.pf.field_ordering == 1:
- return data.T.astype("float64")
- else:
- return data.astype("float64")
-
+ def _read_chunk_data(self,chunk,fields):
+ data = {}
+ grids_by_file = defaultdict(list)
+ if len(chunk.objs) == 0: return data
+ field_list = set(f[1] for f in fields)
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ f = open(grid.filename, "rb")
+ data[grid.id] = {}
+ grid_ncells = np.prod(grid.ActiveDimensions)
+ grid_dims = grid.ActiveDimensions
+ grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
+ read_table_offset = get_read_table_offset(f)
+ for field in self.pf.h.field_list:
+ dtype, offsetr = grid.hierarchy._field_map[field]
+ if grid_ncells != grid0_ncells:
+ offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+ if grid_ncells == grid0_ncells:
+ offset = offsetr
+ f.seek(read_table_offset+offset)
+ if dtype == 'scalar':
+ v = np.fromfile(f, dtype='>f4',
+ count=grid_ncells).reshape(grid_dims,order='F').copy()
+ if dtype == 'vector':
+ v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+ if '_x' in field:
+ v = v[0::3].reshape(grid_dims,order='F').copy()
+ elif '_y' in field:
+ v = v[1::3].reshape(grid_dims,order='F').copy()
+ elif '_z' in field:
+ v = v[2::3].reshape(grid_dims,order='F').copy()
+ if grid.pf.field_ordering == 1:
+ data[grid.id][field] = v.T.astype("float64")
+ else:
+ data[grid.id][field] = v.astype("float64")
+ f.close()
+ return data
+
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
@@ -66,6 +77,27 @@
sl.reverse()
return self._read_data_set(grid, field)[sl]
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ chunks = list(chunks)
+ if any((ftype != "gas" for ftype, fname in fields)):
+ raise NotImplementedError
+ rv = {}
+ for field in fields:
+ rv[field] = np.empty(size, dtype="float64")
+ ng = sum(len(c.objs) for c in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s grids",
+ size, [f2 for f1, f2 in fields], ng)
+ ind = 0
+ for chunk in chunks:
+ data = self._read_chunk_data(chunk, fields)
+ for g in chunk.objs:
+ for field in fields:
+ ftype, fname = field
+ ds = data[g.id].pop(fname)
+ nd = g.select(selector, ds, rv[field], ind) # caches
+ ind += nd
+ data.pop(g.id)
+ return rv
def get_read_table_offset(f):
line = f.readline()
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 876b17286d9b2d349eb8835bd0c46af3ad0c8bfd yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -842,16 +842,22 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.particle_types = ["io"]
+ self.particle_types = []
if self.parameters["NumberOfParticles"] > 0 and \
"AppendActiveParticleType" in self.parameters.keys():
# If this is the case, then we know we should have a DarkMatter
# particle type, and we don't need the "io" type.
- self.particle_types = ["DarkMatter"]
self.parameters["AppendActiveParticleType"].append("DarkMatter")
+ else:
+ # We do not have an "io" type for Enzo particles if the
+ # ActiveParticle machinery is on, as we simply will ignore any of
+ # the non-DarkMatter particles in that case. However, for older
+ # datasets, we call this particle type "io".
+ self.particle_types = ["io"]
for ptype in self.parameters.get("AppendActiveParticleType", []):
self.particle_types.append(ptype)
self.particle_types = tuple(self.particle_types)
+ self.particle_types_raw = self.particle_types
if self.dimensionality == 1:
self._setup_1d()
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/43e5225bdbf3/
Changeset: 43e5225bdbf3
Branch: yt-3.0
User: jzuhone
Date: 2013-12-19 08:01:21
Summary: Check for system type when including alloca
Affected #: 4 files
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -18,8 +18,12 @@
from libc.string cimport memcpy
import data_structures
-cdef extern from "malloc.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef extern from "artio.h":
ctypedef struct artio_fileset_handle "artio_fileset" :
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -23,8 +23,12 @@
from fp_utils cimport *
from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
-cdef extern from "malloc.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef inline int gind(int i, int j, int k, int dims[3]):
# The ordering is such that we want i to vary the slowest in this instance,
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,8 +24,12 @@
from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
from .particle_deposit cimport sph_kernel, gind
-cdef extern from "malloc.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef struct NeighborList
cdef struct NeighborList:
diff -r e0880d58fb3ac5e3d938e9e9e9185feced20e1b3 -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -29,8 +29,12 @@
void FIX_LONG( unsigned )
void FIX_FLOAT( float )
-cdef extern from "malloc.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef extern from "stdio.h":
cdef int SEEK_SET
https://bitbucket.org/yt_analysis/yt/commits/24d5c142678f/
Changeset: 24d5c142678f
Branch: yt-3.0
User: jzuhone
Date: 2013-12-19 08:01:46
Summary: Merging
Affected #: 73 files
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -264,28 +264,45 @@
echo "Alternatively, download the Xcode command line tools from"
echo "the Apple developer tools website."
echo
- echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+ echo "OS X 10.8.4 and 10.9: download Xcode 5.02 from the mac app store."
echo "(search for Xcode)."
+ echo
echo "Additionally, you will have to manually install the Xcode"
- echo "command line tools, see:"
- echo "http://stackoverflow.com/questions/9353444"
- echo "Alternatively, download the Xcode command line tools from"
- echo "the Apple developer tools website."
+ echo "command line tools."
+ echo
+ echo "For OS X 10.8, see:"
+ echo "http://stackoverflow.com/questions/9353444"
echo
- echo "NOTE: It's possible that the installation will fail, if so,"
- echo "please set the following environment variables, remove any"
- echo "broken installation tree, and re-run this script verbatim."
- echo
- echo "$ export CC=gcc"
- echo "$ export CXX=g++"
- echo
- OSX_VERSION=`sw_vers -productVersion`
- if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ echo "For OS X 10.9, the command line tools can be installed"
+ echo "with the following command:"
+ echo " xcode-select --install"
+ echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
then
MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
fi
fi
+ if [ -f /etc/redhat-release ]
+ then
+ echo "Looks like you're on an Redhat-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * openssl-devel"
+ echo " * uuid-devel"
+ echo " * readline-devel"
+ echo " * ncurses-devel"
+ echo " * zip"
+ echo " * gcc-{,c++,gfortran}"
+ echo " * make"
+ echo " * patch"
+ echo
+ echo "You can accomplish this by executing:"
+ echo "$ sudo yum install gcc gcc-g++ gcc-gfortran make patch zip"
+ echo "$ sudo yum install ncurses-devel uuid-devel openssl-devel readline-devel"
+ fi
if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
then
echo "Looks like you're on an OpenSUSE-compatible machine."
@@ -566,16 +583,16 @@
CYTHON='Cython-0.19.1'
FORTHON='Forthon-0.8.11'
PYX='PyX-0.12.1'
-PYTHON='Python-2.7.5'
+PYTHON='Python-2.7.6'
BZLIB='bzip2-1.0.6'
FREETYPE_VER='freetype-2.4.12'
H5PY='h5py-2.1.3'
HDF5='hdf5-1.8.11'
-IPYTHON='ipython-1.0.0'
+IPYTHON='ipython-1.1.0'
LAPACK='lapack-3.4.2'
PNG=libpng-1.6.3
MATPLOTLIB='matplotlib-1.3.0'
-MERCURIAL='mercurial-2.7'
+MERCURIAL='mercurial-2.8'
NOSE='nose-1.3.0'
NUMPY='numpy-1.7.1'
PYTHON_HGLIB='python-hglib-1.0'
@@ -585,14 +602,14 @@
SQLITE='sqlite-autoconf-3071700'
SYMPY='sympy-0.7.3'
TORNADO='tornado-3.1'
-ZEROMQ='zeromq-3.2.3'
+ZEROMQ='zeromq-3.2.4'
ZLIB='zlib-1.2.8'
# Now we dump all our SHA512 files out.
echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '3df0ba4b1cfef5f02fb27925de4c2ca414eca9000af6a3d475d39063720afe987287c3d51377e0a36b88015573ef699f700782e1749c7a357b8390971d858a79 Python-2.7.6.tgz' > Python-2.7.6.tgz.sha512
echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
@@ -600,11 +617,11 @@
echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
-echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '46b8ae25df2ced674b3b3629070aafac955ba3aa2a5e749f8e63ef1f459126e1c4a9a03661406151622590a90c73b527716ad71bc626f57f52b51abfae0f43ca ipython-1.1.0.tar.gz' > ipython-1.1.0.tar.gz.sha512
echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
-echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'b08dcd746728d89f1f96036f39df1608fad0ff863ae48fe12424b1645936ebbf59b9068b93fe3c7cfd2036db046df3dc814119f89a827bd5f008d32f323d45a8 mercurial-2.8.tar.gz' > mercurial-2.8.tar.gz.sha512
echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
@@ -614,7 +631,7 @@
echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
-echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'd8eef84860bc5314b42a2cc210340572a9148e008ea65f7650844d0edbe457d6758785047c2770399607f69ba3b3a544db9775a5cdf961223f7e278ef7e0f5c6 zeromq-3.2.4.tar.gz' > zeromq-3.2.4.tar.gz.sha512
echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
@@ -1006,10 +1023,7 @@
echo
echo "To get started with yt, check out the orientation:"
echo
- echo " http://yt-project.org/doc/orientation/"
- echo
- echo "or just activate your environment and run 'yt serve' to bring up the"
- echo "yt GUI."
+ echo " http://yt-project.org/doc/bootcamp/"
echo
echo "The source for yt is located at:"
echo " $YT_DIR"
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/halo_profiler/multi_halo_profiler.py
--- a/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
+++ b/yt/analysis_modules/halo_profiler/multi_halo_profiler.py
@@ -25,7 +25,7 @@
from yt.convenience import \
load
from yt.data_objects.profiles import \
- BinnedProfile1D, EmptyProfileData
+ BinnedProfile1D, YTEmptyProfileData
from yt.analysis_modules.halo_finding.api import *
from .halo_filters import \
VirialFilter
@@ -588,7 +588,7 @@
profile = BinnedProfile1D(sphere, self.n_profile_bins, "RadiusMpc",
r_min, halo['r_max'],
log_space=True, end_collect=True)
- except EmptyProfileData:
+ except YTEmptyProfileData:
mylog.error("Caught EmptyProfileData exception, returning None for this halo.")
return None
# Figure out which fields to add simultaneously
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/level_sets/api.py
--- a/yt/analysis_modules/level_sets/api.py
+++ b/yt/analysis_modules/level_sets/api.py
@@ -14,7 +14,6 @@
#-----------------------------------------------------------------------------
from .contour_finder import \
- coalesce_join_tree, \
identify_contours
from .clump_handling import \
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/level_sets/clump_handling.py
--- a/yt/analysis_modules/level_sets/clump_handling.py
+++ b/yt/analysis_modules/level_sets/clump_handling.py
@@ -107,10 +107,11 @@
print "Wiping out existing children clumps."
self.children = []
if max_val is None: max_val = self.max_val
- contour_info = identify_contours(self.data, self.field, min_val, max_val,
- self.cached_fields)
- for cid in contour_info:
- new_clump = self.data.extract_region(contour_info[cid])
+ nj, cids = identify_contours(self.data, self.field, min_val, max_val)
+ for cid in range(nj):
+ new_clump = self.data.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
self.children.append(Clump(new_clump, self, self.field,
self.cached_fields,function=self.function,
clump_info=self.clump_info))
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/level_sets/contour_finder.py
--- a/yt/analysis_modules/level_sets/contour_finder.py
+++ b/yt/analysis_modules/level_sets/contour_finder.py
@@ -20,120 +20,52 @@
import yt.utilities.data_point_utilities as data_point_utilities
import yt.utilities.lib as amr_utils
-def coalesce_join_tree(jtree1):
- joins = defaultdict(set)
- nj = jtree1.shape[0]
- for i1 in range(nj):
- current_new = jtree1[i1, 0]
- current_old = jtree1[i1, 1]
- for i2 in range(nj):
- if jtree1[i2, 1] == current_new:
- current_new = max(current_new, jtree1[i2, 0])
- jtree1[i1, 0] = current_new
- for i1 in range(nj):
- joins[jtree1[i1, 0]].update([jtree1[i1, 1], jtree1[i1, 0]])
- updated = -1
- while updated != 0:
- keys = list(reversed(sorted(joins.keys())))
- updated = 0
- for k1 in keys + keys[::-1]:
- if k1 not in joins: continue
- s1 = joins[k1]
- for k2 in keys + keys[::-1]:
- if k2 >= k1: continue
- if k2 not in joins: continue
- s2 = joins[k2]
- if k2 in s1:
- s1.update(joins.pop(k2))
- updated += 1
- elif not s1.isdisjoint(s2):
- s1.update(joins.pop(k2))
- s1.update([k2])
- updated += 1
- tr = []
- for k in joins.keys():
- v = joins.pop(k)
- tr.append((k, np.array(list(v), dtype="int64")))
- return tr
-
def identify_contours(data_source, field, min_val, max_val,
cached_fields=None):
- cur_max_id = np.sum([g.ActiveDimensions.prod() for g in data_source._grids])
- pbar = get_pbar("First pass", len(data_source._grids))
- grids = sorted(data_source._grids, key=lambda g: -g.Level)
+ tree = amr_utils.ContourTree()
+ gct = amr_utils.TileContourTree(min_val, max_val)
total_contours = 0
- tree = []
- for gi,grid in enumerate(grids):
- pbar.update(gi+1)
- cm = data_source._get_cut_mask(grid)
- if cm is True: cm = np.ones(grid.ActiveDimensions, dtype='bool')
- old_field_parameters = grid.field_parameters
- grid.field_parameters = data_source.field_parameters
- local_ind = np.where( (grid[field] > min_val)
- & (grid[field] < max_val) & cm )
- grid.field_parameters = old_field_parameters
- if local_ind[0].size == 0: continue
- kk = np.arange(cur_max_id, cur_max_id-local_ind[0].size, -1)
- grid["tempContours"] = np.ones(grid.ActiveDimensions, dtype='int64') * -1
- grid["tempContours"][local_ind] = kk[:]
- cur_max_id -= local_ind[0].size
- xi_u,yi_u,zi_u = np.where(grid["tempContours"] > -1)
- cor_order = np.argsort(-1*grid["tempContours"][(xi_u,yi_u,zi_u)])
- fd_orig = grid["tempContours"].copy()
- xi = xi_u[cor_order]
- yi = yi_u[cor_order]
- zi = zi_u[cor_order]
- while data_point_utilities.FindContours(grid["tempContours"], xi, yi, zi) < 0:
- pass
- total_contours += np.unique(grid["tempContours"][grid["tempContours"] > -1]).size
- new_contours = np.unique(grid["tempContours"][grid["tempContours"] > -1]).tolist()
- tree += zip(new_contours, new_contours)
- tree = set(tree)
+ contours = {}
+ empty_mask = np.ones((1,1,1), dtype="uint8")
+ node_ids = []
+ for (g, node, (sl, dims, gi)) in data_source.tiles.slice_traverse():
+ node.node_ind = len(node_ids)
+ nid = node.node_id
+ node_ids.append(nid)
+ values = g[field][sl].astype("float64")
+ contour_ids = np.zeros(dims, "int64") - 1
+ gct.identify_contours(values, contour_ids, total_contours)
+ new_contours = tree.cull_candidates(contour_ids)
+ total_contours += new_contours.shape[0]
+ tree.add_contours(new_contours)
+ # Now we can create a partitioned grid with the contours.
+ pg = amr_utils.PartitionedGrid(g.id,
+ [contour_ids.view("float64")],
+ empty_mask, g.dds * gi, g.dds * (gi + dims),
+ dims.astype("int64"))
+ contours[nid] = (g.Level, node.node_ind, pg, sl)
+ node_ids = np.array(node_ids)
+ trunk = data_source.tiles.tree.trunk
+ mylog.info("Linking node (%s) contours.", len(contours))
+ amr_utils.link_node_contours(trunk, contours, tree, node_ids)
+ mylog.info("Linked.")
+ #joins = tree.cull_joins(bt)
+ #tree.add_joins(joins)
+ joins = tree.export()
+ contour_ids = defaultdict(list)
+ pbar = get_pbar("Updating joins ... ", len(contours))
+ final_joins = np.unique(joins[:,1])
+ for i, nid in enumerate(sorted(contours)):
+ level, node_ind, pg, sl = contours[nid]
+ ff = pg.my_data[0].view("int64")
+ amr_utils.update_joins(joins, ff, final_joins)
+ contour_ids[pg.parent_grid_id].append((sl, ff))
+ pbar.update(i)
pbar.finish()
- pbar = get_pbar("Calculating joins ", len(data_source._grids))
- grid_set = set()
- for gi,grid in enumerate(grids):
- pbar.update(gi)
- cg = grid.retrieve_ghost_zones(1, "tempContours", smoothed=False)
- grid_set.update(set(cg._grids))
- fd = cg["tempContours"].astype('int64')
- boundary_tree = amr_utils.construct_boundary_relationships(fd)
- tree.update(((a, b) for a, b in boundary_tree))
- pbar.finish()
- sort_new = np.array(list(tree), dtype='int64')
- mylog.info("Coalescing %s joins", sort_new.shape[0])
- joins = coalesce_join_tree(sort_new)
- #joins = [(i, np.array(list(j), dtype="int64")) for i, j in sorted(joins.items())]
- pbar = get_pbar("Joining ", len(joins))
- # This process could and should be done faster
- print "Joining..."
- t1 = time.time()
- ff = data_source["tempContours"].astype("int64")
- amr_utils.update_joins(joins, ff)
- data_source["tempContours"] = ff.astype("float64")
- #for i, new in enumerate(sorted(joins.keys())):
- # pbar.update(i)
- # old_set = joins[new]
- # for old in old_set:
- # if old == new: continue
- # i1 = (data_source["tempContours"] == old)
- # data_source["tempContours"][i1] = new
- t2 = time.time()
- print "Finished joining in %0.2e seconds" % (t2-t1)
- pbar.finish()
- data_source._flush_data_to_grids("tempContours", -1, dtype='int64')
- del data_source.field_data["tempContours"] # Force a reload from the grids
- data_source.get_data("tempContours")
- contour_ind = {}
- i = 0
- for contour_id in np.unique(data_source["tempContours"]):
- if contour_id == -1: continue
- contour_ind[i] = np.where(data_source["tempContours"] == contour_id)
- mylog.debug("Contour id %s has %s cells", i, contour_ind[i][0].size)
- i += 1
- mylog.info("Identified %s contours between %0.5e and %0.5e",
- len(contour_ind.keys()),min_val,max_val)
- for grid in chain(grid_set):
- grid.field_data.pop("tempContours", None)
- del data_source.field_data["tempContours"]
- return contour_ind
+ rv = dict()
+ rv.update(contour_ids)
+ # NOTE: Because joins can appear in both a "final join" and a subsequent
+ # "join", we can't know for sure how many unique joins there are without
+ # checking if no cells match or doing an expensive operation checking for
+ # the unique set of final join values.
+ return final_joins.size, rv
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/photon_simulator/photon_simulator.py
--- a/yt/analysis_modules/photon_simulator/photon_simulator.py
+++ b/yt/analysis_modules/photon_simulator/photon_simulator.py
@@ -250,6 +250,7 @@
hubble = getattr(pf, "hubble_constant", None)
omega_m = getattr(pf, "omega_matter", None)
omega_l = getattr(pf, "omega_lambda", None)
+ if hubble == 0: hubble = None
if hubble is not None and \
omega_m is not None and \
omega_l is not None:
@@ -948,9 +949,9 @@
col1 = pyfits.Column(name='ENERGY', format='E',
array=self["eobs"])
col2 = pyfits.Column(name='DEC', format='D',
+ array=self["ysky"])
+ col3 = pyfits.Column(name='RA', format='D',
array=self["xsky"])
- col3 = pyfits.Column(name='RA', format='D',
- array=self["ysky"])
coldefs = pyfits.ColDefs([col1, col2, col3])
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
--- a/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
+++ b/yt/analysis_modules/spectral_integrator/spectral_frequency_integrator.py
@@ -19,7 +19,10 @@
import numpy as np
import os
-from yt.funcs import *
+from yt.funcs import \
+ download_file, \
+ mylog, \
+ only_on_root
from yt.data_objects.field_info_container import add_field
from yt.utilities.exceptions import YTException
@@ -31,6 +34,23 @@
xray_data_version = 1
+def _get_data_file():
+ data_file = "xray_emissivity.h5"
+ data_url = "http://yt-project.org/data"
+ if "YT_DEST" in os.environ and \
+ os.path.isdir(os.path.join(os.environ["YT_DEST"], "data")):
+ data_dir = os.path.join(os.environ["YT_DEST"], "data")
+ else:
+ data_dir = "."
+ data_path = os.path.join(data_dir, data_file)
+ if not os.path.exists(data_path):
+ mylog.info("Attempting to download supplementary data from %s to %s." %
+ (data_url, data_dir))
+ fn = download_file(os.path.join(data_url, data_file), data_path)
+ if fn != data_path:
+ raise RuntimeError, "Failed to download supplementary data."
+ return data_path
+
class EnergyBoundsException(YTException):
def __init__(self, lower, upper):
self.lower = lower
@@ -65,8 +85,7 @@
default_filename = False
if filename is None:
- filename = os.path.join(os.environ["YT_DEST"],
- "data", "xray_emissivity.h5")
+ filename = _get_data_file()
default_filename = True
if not os.path.exists(filename):
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/analysis_modules/sunyaev_zeldovich/projection.py
--- a/yt/analysis_modules/sunyaev_zeldovich/projection.py
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -19,11 +19,11 @@
#-----------------------------------------------------------------------------
from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.utilities.fits_image import FITSImageBuffer
from yt.data_objects.image_array import ImageArray
from yt.data_objects.field_info_container import add_field
from yt.funcs import fix_axis, mylog, iterable, get_pbar
from yt.utilities.definitions import inv_axis_names
-from yt.visualization.image_writer import write_fits, write_projection
from yt.visualization.volume_rendering.camera import off_axis_projection
from yt.utilities.parallel_tools.parallel_analysis_interface import \
communication_system, parallel_root_only
@@ -272,32 +272,52 @@
self.data["TeSZ"] = ImageArray(Te)
@parallel_root_only
- def write_fits(self, filename, clobber=True):
+ def write_fits(self, filename, sky_center=None, sky_scale=None, clobber=True):
r""" Export images to a FITS file. Writes the SZ distortion in all
specified frequencies as well as the mass-weighted temperature and the
- optical depth. Distance units are in kpc.
+ optical depth. Distance units are in kpc, unless *sky_center*
+ and *scale* are specified.
Parameters
----------
filename : string
The name of the FITS file to be written.
+ sky_center : tuple of floats, optional
+ The center of the observation in (RA, Dec) in degrees. Only used if
+ converting to sky coordinates.
+ sky_scale : float, optional
+ Scale between degrees and kpc. Only used if
+ converting to sky coordinates.
clobber : boolean, optional
If the file already exists, do we overwrite?
Examples
--------
+ >>> # This example just writes out a FITS file with kpc coords
>>> szprj.write_fits("SZbullet.fits", clobber=False)
+ >>> # This example uses sky coords
+ >>> sky_scale = 1./3600. # One arcsec per kpc
+ >>> sky_center = (30., 45.) # In degrees
+ >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
"""
- coords = {}
- coords["dx"] = self.dx*self.pf.units["kpc"]
- coords["dy"] = self.dy*self.pf.units["kpc"]
- coords["xctr"] = 0.0
- coords["yctr"] = 0.0
- coords["units"] = "kpc"
- other_keys = {"Time" : self.pf.current_time}
- write_fits(self.data, filename, clobber=clobber, coords=coords,
- other_keys=other_keys)
+ deltas = np.array([self.dx*self.pf.units["kpc"],
+ self.dy*self.pf.units["kpc"]])
+
+ if sky_center is None:
+ center = [0.0]*2
+ units = "kpc"
+ else:
+ center = sky_center
+ units = "deg"
+ deltas *= sky_scale
+
+ fib = FITSImageBuffer(self.data, fields=self.data.keys(),
+ center=center, units=units,
+ scale=deltas)
+ fib.update_all_headers("Time", self.pf.current_time)
+ fib.writeto(filename, clobber=clobber)
+
@parallel_root_only
def write_png(self, filename_prefix, cmap_name="algae",
log_fields=None):
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/convenience.py
--- a/yt/convenience.py
+++ b/yt/convenience.py
@@ -53,6 +53,8 @@
if isinstance(arg, types.StringTypes):
if os.path.exists(arg):
valid_file.append(True)
+ elif arg.startswith("http"):
+ valid_file.append(True)
else:
if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)):
valid_file.append(True)
@@ -128,6 +130,20 @@
if simulation_type not in simulation_time_series_registry:
raise YTSimulationNotIdentified(simulation_type)
+ if os.path.exists(parameter_filename):
+ valid_file = True
+ elif os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"),
+ parameter_filename)):
+ parameter_filename = os.path.join(ytcfg.get("yt", "test_data_dir"),
+ parameter_filename)
+ valid_file = True
+ else:
+ valid_file = False
+
+ if not valid_file:
+ raise YTOutputNotIdentified((parameter_filename, simulation_type),
+ dict(find_outputs=find_outputs))
+
return simulation_time_series_registry[simulation_type](parameter_filename,
find_outputs=find_outputs)
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -27,11 +27,12 @@
particle_handler_registry
from profiles import \
- EmptyProfileData, \
+ YTEmptyProfileData, \
BinnedProfile, \
BinnedProfile1D, \
BinnedProfile2D, \
- BinnedProfile3D
+ BinnedProfile3D, \
+ create_profile
from time_series import \
TimeSeriesData, \
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -167,12 +167,12 @@
Parameters
----------
- axis : int
- The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
field : string
This is the field which will be "projected" along the axis. If
multiple are specified (in a list) they will all be projected in
the first pass.
+ axis : int
+ The axis along which to slice. Can be 0, 1, or 2 for x, y, z.
weight_field : string
If supplied, the field being projected will be multiplied by this
weight value before being integrated, and at the conclusion of the
@@ -274,11 +274,12 @@
for chunk in self.data_source.chunks([], "io"):
self._initialize_chunk(chunk, tree)
# This needs to be parallel_objects-ified
- for chunk in parallel_objects(self.data_source.chunks(
- chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
- get_memory_usage()/1024.)
- self._handle_chunk(chunk, fields, tree)
+ with self.data_source._field_parameter_state(self.field_parameters):
+ for chunk in parallel_objects(self.data_source.chunks(
+ chunk_fields, "io")):
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
+ self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
merge_style = -1
@@ -308,6 +309,7 @@
nvals *= convs[None,:]
# We now convert to half-widths and center-points
data = {}
+ #non_nan = ~np.any(np.isnan(nvals), axis=-1)
data['px'] = px
data['py'] = py
data['weight_field'] = nwvals
@@ -319,8 +321,9 @@
field_data = np.hsplit(data.pop('fields'), len(fields))
for fi, field in enumerate(fields):
mylog.debug("Setting field %s", field)
- self[field] = field_data[fi].ravel()
- for i in data.keys(): self[i] = data.pop(i)
+ self[field] = field_data[fi].ravel()#[non_nan]
+ for i in data.keys():
+ self[i] = data.pop(i)#[non_nan]
mylog.info("Projection completed")
def _initialize_chunk(self, chunk, tree):
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -33,6 +33,8 @@
ParallelAnalysisInterface
from yt.utilities.parameter_file_storage import \
ParameterFileStore
+from yt.utilities.amr_kdtree.api import \
+ AMRKDTree
from .derived_quantities import DerivedQuantityCollection
from .field_info_container import \
NeedsGridType, ValidateSpatial
@@ -351,6 +353,24 @@
else:
self.hierarchy.save_object(self, name)
+ def to_glue(self, fields, label="yt"):
+ """
+ Takes specific *fields* in the container and exports them to
+ Glue (http://www.glueviz.org) for interactive
+ analysis. Optionally add a *label*.
+ """
+ from glue.core import DataCollection, Data
+ from glue.core.coordinates import coordinates_from_header
+ from glue.qt.glue_application import GlueApplication
+
+ gdata = Data(label=label)
+ for component_name in fields:
+ gdata.add_component(self[component_name], component_name)
+ dc = DataCollection([gdata])
+
+ app = GlueApplication(dc)
+ app.start()
+
def __reduce__(self):
args = tuple([self.pf._hash(), self._type_name] +
[getattr(self, n) for n in self._con_args] +
@@ -365,6 +385,13 @@
return s
@contextmanager
+ def _field_parameter_state(self, field_parameters):
+ old_field_parameters = self.field_parameters
+ self.field_parameters = field_parameters
+ yield
+ self.field_parameters = old_field_parameters
+
+ @contextmanager
def _field_type_state(self, ftype, finfo, obj = None):
if obj is None: obj = self
old_particle_type = obj._current_particle_type
@@ -407,6 +434,14 @@
explicit_fields.append((ftype, fname))
return explicit_fields
+ _tree = None
+
+ @property
+ def tiles(self):
+ if self._tree is not None: return self._tree
+ self._tree = AMRKDTree(self.pf, data_source=self)
+ return self._tree
+
@property
def blocks(self):
for io_chunk in self.chunks([], "io"):
@@ -751,11 +786,13 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- def cut_region(self, field_cuts):
+ def cut_region(self, field_cuts, field_parameters = None):
"""
- Return an InLineExtractedRegion, where the grid cells are cut on the
- fly with a set of field_cuts. It is very useful for applying
- conditions to the fields in your data object.
+ Return an InLineExtractedRegion, where the object cells are cut on the
+ fly with a set of field_cuts. It is very useful for applying
+ conditions to the fields in your data object. Note that in previous
+ versions of yt, this accepted 'grid' as a variable, but presently it
+ requires 'obj'.
Examples
--------
@@ -763,19 +800,12 @@
>>> pf = load("RedshiftOutput0005")
>>> ad = pf.h.all_data()
- >>> cr = ad.cut_region(["grid['Temperature'] > 1e6"])
+ >>> cr = ad.cut_region(["obj['Temperature'] > 1e6"])
>>> print cr.quantities["TotalQuantity"]("CellMassMsun")
-
"""
- return YTValueCutExtractionBase(self, field_cuts)
-
- def extract_region(self, indices):
- """
- Return an ExtractedRegion where the points contained in it are defined
- as the points in `this` data object with the given *indices*.
- """
- fp = self.field_parameters.copy()
- return YTSelectedIndicesBase(self, indices, field_parameters = fp)
+ cr = self.pf.h.cut_region(self, field_cuts,
+ field_parameters = field_parameters)
+ return cr
def extract_isocontours(self, field, value, filename = None,
rescale = False, sample_values = None):
@@ -966,12 +996,15 @@
ff, mask, grid.LeftEdge, grid.dds)
def extract_connected_sets(self, field, num_levels, min_val, max_val,
- log_space=True, cumulative=True, cache=False):
+ log_space=True, cumulative=True):
"""
This function will create a set of contour objects, defined
by having connected cell structures, which can then be
studied and used to 'paint' their source grids, thus enabling
them to be plotted.
+
+ Note that this function *can* return a connected set object that has no
+ member values.
"""
if log_space:
cons = np.logspace(np.log10(min_val),np.log10(max_val),
@@ -979,8 +1012,6 @@
else:
cons = np.linspace(min_val, max_val, num_levels+1)
contours = {}
- if cache: cached_fields = defaultdict(lambda: dict())
- else: cached_fields = None
for level in range(num_levels):
contours[level] = {}
if cumulative:
@@ -988,10 +1019,11 @@
else:
mv = cons[level+1]
from yt.analysis_modules.level_sets.api import identify_contours
- cids = identify_contours(self, field, cons[level], mv,
- cached_fields)
- for cid, cid_ind in cids.items():
- contours[level][cid] = self.extract_region(cid_ind)
+ nj, cids = identify_contours(self, field, cons[level], mv)
+ for cid in range(nj):
+ contours[level][cid] = self.cut_region(
+ ["obj['Contours'] == %s" % (cid + 1)],
+ {'contour_slices': cids})
return cons, contours
def paint_grids(self, field, value, default_value=None):
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -60,21 +60,21 @@
def _gradx(f, data):
grad = data[field][sl,1:-1,1:-1] - data[field][sr,1:-1,1:-1]
- grad /= 2.0*data["dx"].flat[0]
+ grad /= 2.0*data["dx"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
def _grady(f, data):
grad = data[field][1:-1,sl,1:-1] - data[field][1:-1,sr,1:-1]
- grad /= 2.0*data["dy"].flat[0]
+ grad /= 2.0*data["dy"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
def _gradz(f, data):
grad = data[field][1:-1,1:-1,sl] - data[field][1:-1,1:-1,sr]
- grad /= 2.0*data["dz"].flat[0]
+ grad /= 2.0*data["dz"].flat[0]*data.pf.units["cm"]
g = np.zeros(data[field].shape, dtype='float64')
g[1:-1,1:-1,1:-1] = grad
return g
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -112,6 +112,10 @@
_domain_ind = None
+ def mask_refinement(self, selector):
+ mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
+ return mask
+
def select_blocks(self, selector):
mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
mask = self._reshape_vals(mask)
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -20,16 +20,10 @@
from yt.data_objects.data_containers import YTFieldData
from yt.utilities.lib import bin_profile1d, bin_profile2d, bin_profile3d
+from yt.utilities.lib import new_bin_profile1d, new_bin_profile2d, \
+ new_bin_profile3d
from yt.utilities.parallel_tools.parallel_analysis_interface import \
- ParallelAnalysisInterface
-
-_field_mapping = {
- "total_mass": ("CellMassMsun", "ParticleMassMsun"),
- "hybrid_radius": ("RadiusCode", "ParticleRadiusCode"),
- }
-
-class EmptyProfileData(Exception):
- pass
+ ParallelAnalysisInterface, parallel_objects
def preserve_source_parameters(func):
def save_state(*args, **kwargs):
@@ -55,7 +49,6 @@
self._data_source = data_source
self.pf = data_source.pf
self.field_data = YTFieldData()
- self._pdata = {}
@property
def hierarchy(self):
@@ -131,14 +124,11 @@
def __setitem__(self, key, value):
self.field_data[key] = value
- def _get_field(self, source, this_field, check_cut):
+ def _get_field(self, source, field, check_cut):
# This is where we will iterate to get all contributions to a field
# which is how we will implement hybrid particle/cell fields
# but... we default to just the field.
- data = []
- for field in _field_mapping.get(this_field, (this_field,)):
- data.append(source[field].astype('float64'))
- return np.concatenate(data, axis=0)
+ return source[field].astype('float64')
def _fix_pickle(self):
if isinstance(self._data_source, tuple):
@@ -225,7 +215,7 @@
def _get_bins(self, source, check_cut=False):
source_data = self._get_field(source, self.bin_field, check_cut)
if source_data.size == 0: # Nothing for us here.
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
# Truncate at boundaries.
if self.end_collect:
mi = np.ones_like(source_data).astype('bool')
@@ -234,7 +224,7 @@
& (source_data < self._bins.max()))
sd = source_data[mi]
if sd.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
# Stick the bins into our fixed bins, set at initialization
bin_indices = np.digitize(sd, self._bins)
if self.end_collect: #limit the range of values to 0 and n_bins-1
@@ -400,7 +390,7 @@
source_data_x = self._get_field(source, self.x_bin_field, check_cut)
source_data_y = self._get_field(source, self.y_bin_field, check_cut)
if source_data_x.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
if self.end_collect:
mi = np.arange(source_data_x.size)
@@ -412,7 +402,7 @@
sd_x = source_data_x[mi]
sd_y = source_data_y[mi]
if sd_x.size == 0 or sd_y.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -518,48 +508,6 @@
return np.log10(upper), np.log10(lower)
return upper, lower
-class BinnedProfile2DInlineCut(BinnedProfile2D):
- def __init__(self, data_source,
- x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
- y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
- end_collect=False):
- self.indices = data_source["Ones"].astype("bool")
- BinnedProfile2D.__init__(self, data_source,
- x_n_bins, x_bin_field, x_lower_bound, x_upper_bound, x_log,
- y_n_bins, y_bin_field, y_lower_bound, y_upper_bound, y_log,
- end_collect)
-
- @preserve_source_parameters
- def _bin_field(self, source, field, weight, accumulation,
- args, check_cut=False):
- source_data = self._get_field(source, field, check_cut)
- if weight: weight_data = self._get_field(source, weight, check_cut)
- else: weight_data = np.ones(source_data.shape, dtype='float64')
- self.total_stuff = source_data.sum()
- binned_field = self._get_empty_field()
- weight_field = self._get_empty_field()
- m_field = self._get_empty_field()
- q_field = self._get_empty_field()
- used_field = self._get_empty_field()
- mi = args[0]
- bin_indices_x = args[1][self.indices].ravel().astype('int64')
- bin_indices_y = args[2][self.indices].ravel().astype('int64')
- source_data = source_data[mi][self.indices]
- weight_data = weight_data[mi][self.indices]
- nx = bin_indices_x.size
- #mylog.debug("Binning %s / %s times", source_data.size, nx)
- bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
- weight_field, binned_field, m_field, q_field, used_field)
- if accumulation: # Fix for laziness
- if not iterable(accumulation):
- raise SyntaxError("Accumulation needs to have length 2")
- if accumulation[0]:
- binned_field = np.add.accumulate(binned_field, axis=0)
- if accumulation[1]:
- binned_field = np.add.accumulate(binned_field, axis=1)
- return binned_field, weight_field, used_field.astype('bool')
-
-
class BinnedProfile3D(BinnedProfile):
"""
A 'Profile' produces either a weighted (or unweighted) average
@@ -658,7 +606,7 @@
source_data_y = self._get_field(source, self.y_bin_field, check_cut)
source_data_z = self._get_field(source, self.z_bin_field, check_cut)
if source_data_x.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
if self.end_collect:
mi = np.arange(source_data_x.size)
else:
@@ -672,7 +620,7 @@
sd_y = source_data_y[mi]
sd_z = source_data_z[mi]
if sd_x.size == 0 or sd_y.size == 0 or sd_z.size == 0:
- raise EmptyProfileData()
+ raise YTEmptyProfileData()
bin_indices_x = np.digitize(sd_x, self._x_bins) - 1
bin_indices_y = np.digitize(sd_y, self._y_bins) - 1
@@ -790,25 +738,280 @@
self._data_source.hierarchy.save_data(values, "/Profiles", name,
set_attr, force=force)
-class StoredBinnedProfile3D(BinnedProfile3D):
- def __init__(self, pf, name):
- """
- Given a *pf* parameterfile and the *name* of a stored profile, retrieve
- it into a read-only data structure.
- """
+class ProfileFieldAccumulator(object):
+ def __init__(self, n_fields, size):
+ shape = size + (n_fields,)
+ self.values = np.zeros(shape, dtype="float64")
+ self.mvalues = np.zeros(shape, dtype="float64")
+ self.qvalues = np.zeros(shape, dtype="float64")
+ self.used = np.zeros(size, dtype='bool')
+ self.weight_values = np.zeros(size, dtype="float64")
+
+class ProfileND(ParallelAnalysisInterface):
+ def __init__(self, data_source, weight_field = None):
+ self.data_source = data_source
+ self.pf = data_source.pf
self.field_data = YTFieldData()
- prof_arr = pf.h.get_data("/Profiles", name)
- if prof_arr is None: raise KeyError("No such array")
- for ax in 'xyz':
- for base in ['%s_bin_field', '_%s_log']:
- setattr(self, base % ax, prof_arr.getAttr(base % ax))
- for ax in 'xyz':
- fn = getattr(self, '%s_bin_field' % ax)
- self.field_data[fn] = prof_arr.getAttr('%s_bin_values' % ax)
- shape = prof_arr.getAttr('shape')
- for fn, fd in zip(prof_arr.getAttr('field_order'),
- prof_arr.read().transpose()):
- self.field_data[fn] = fd.reshape(shape)
+ self.weight_field = weight_field
- def add_fields(self, *args, **kwargs):
- raise RuntimeError("Sorry, you can't add to a stored profile.")
+ def add_fields(self, fields):
+ fields = ensure_list(fields)
+ temp_storage = ProfileFieldAccumulator(len(fields), self.size)
+ for g in parallel_objects(self.data_source._grids):
+ self._bin_grid(g, fields, temp_storage)
+ self._finalize_storage(fields, temp_storage)
+
+ def _finalize_storage(self, fields, temp_storage):
+ # We use our main comm here
+ # This also will fill _field_data
+ # FIXME: Add parallelism and combining std stuff
+ if self.weight_field is not None:
+ temp_storage.values /= temp_storage.weight_values[...,None]
+ blank = ~temp_storage.used
+ for i, field in enumerate(fields):
+ self.field_data[field] = temp_storage.values[...,i]
+ self.field_data[field][blank] = 0.0
+
+ def _bin_grid(self, grid, fields, storage):
+ raise NotImplementedError
+
+ def _filter(self, bin_fields, cut_points):
+ # cut_points is initially just the points inside our region
+ # we also want to apply a filtering based on min/max
+ filter = np.zeros(bin_fields[0].shape, dtype='bool')
+ filter[cut_points] = True
+ for (mi, ma), data in zip(self.bounds, bin_fields):
+ filter &= (data > mi)
+ filter &= (data < ma)
+ return filter, [data[filter] for data in bin_fields]
+
+ def _get_data(self, grid, fields):
+ # Save the values in the grid beforehand.
+ old_params = grid.field_parameters
+ old_keys = grid.field_data.keys()
+ grid.field_parameters = self.data_source.field_parameters
+ # Now we ask our source which values to include
+ pointI = self.data_source._get_point_indices(grid)
+ bin_fields = [grid[bf] for bf in self.bin_fields]
+ # We want to make sure that our fields are within the bounds of the
+ # binning
+ filter, bin_fields = self._filter(bin_fields, pointI)
+ if not np.any(filter): return None
+ arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
+ for i, field in enumerate(fields):
+ arr[:,i] = grid[field][filter]
+ if self.weight_field is not None:
+ weight_data = grid[self.weight_field]
+ else:
+ weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+ weight_data = weight_data[filter]
+ # So that we can pass these into
+ grid.field_parameters = old_params
+ grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
+ return arr, weight_data, bin_fields
+
+ def __getitem__(self, key):
+ return self.field_data[key]
+
+ def __iter__(self):
+ return sorted(self.field_data.items())
+
+ def _get_bins(self, mi, ma, n, take_log):
+ if take_log:
+ return np.logspace(np.log10(mi), np.log10(ma), n+1)
+ else:
+ return np.linspace(mi, ma, n+1)
+
+class Profile1D(ProfileND):
+ def __init__(self, data_source, x_field, x_n, x_min, x_max, x_log,
+ weight_field = None):
+ super(Profile1D, self).__init__(data_source, weight_field)
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+
+ self.size = (self.x_bins.size - 1,)
+ self.bin_fields = (self.x_field,)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
+ self.x = self.x_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ gd = self._get_data(grid, fields)
+ if gd is None: return
+ fdata, wdata, (bf_x,) = gd
+ bin_ind = np.digitize(bf_x, self.x_bins) - 1
+ new_bin_profile1d(bin_ind, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+class Profile2D(ProfileND):
+ def __init__(self, data_source,
+ x_field, x_n, x_min, x_max, x_log,
+ y_field, y_n, y_min, y_max, y_log,
+ weight_field = None):
+ super(Profile2D, self).__init__(data_source, weight_field)
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+ self.y_field = y_field
+ self.y_log = y_log
+ self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+
+ self.size = (self.x_bins.size - 1, self.y_bins.size - 1)
+
+ self.bin_fields = (self.x_field, self.y_field)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+ (self.y_bins[0], self.y_bins[-1]))
+ self.x = self.x_bins
+ self.y = self.y_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ rv = self._get_data(grid, fields)
+ if rv is None: return
+ fdata, wdata, (bf_x, bf_y) = rv
+ bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+ bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+ new_bin_profile2d(bin_ind_x, bin_ind_y, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+class Profile3D(ProfileND):
+ def __init__(self, data_source,
+ x_field, x_n, x_min, x_max, x_log,
+ y_field, y_n, y_min, y_max, y_log,
+ z_field, z_n, z_min, z_max, z_log,
+ weight_field = None):
+ super(Profile3D, self).__init__(data_source, weight_field)
+ # X
+ self.x_field = x_field
+ self.x_log = x_log
+ self.x_bins = self._get_bins(x_min, x_max, x_n, x_log)
+ # Y
+ self.y_field = y_field
+ self.y_log = y_log
+ self.y_bins = self._get_bins(y_min, y_max, y_n, y_log)
+ # Z
+ self.z_field = z_field
+ self.z_log = z_log
+ self.z_bins = self._get_bins(z_min, z_max, z_n, z_log)
+
+ self.size = (self.x_bins.size - 1,
+ self.y_bins.size - 1,
+ self.z_bins.size - 1)
+
+ self.bin_fields = (self.x_field, self.y_field, self.z_field)
+ self.bounds = ((self.x_bins[0], self.x_bins[-1]),
+ (self.y_bins[0], self.y_bins[-1]),
+ (self.z_bins[0], self.z_bins[-1]))
+
+ self.x = self.x_bins
+ self.y = self.y_bins
+ self.z = self.z_bins
+
+ def _bin_grid(self, grid, fields, storage):
+ rv = self._get_data(grid, fields)
+ if rv is None: return
+ fdata, wdata, (bf_x, bf_y, bf_z) = rv
+ bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
+ bin_ind_y = np.digitize(bf_y, self.y_bins) - 1
+ bin_ind_z = np.digitize(bf_z, self.z_bins) - 1
+ new_bin_profile3d(bin_ind_x, bin_ind_y, bin_ind_z, wdata, fdata,
+ storage.weight_values, storage.values,
+ storage.mvalues, storage.qvalues,
+ storage.used)
+ # We've binned it!
+
+def create_profile(data_source, bin_fields, n = 64,
+ weight_field = "CellMass", fields = None,
+ accumulation = False, fractional = False):
+ r"""
+ Create a 1, 2, or 3D profile object.
+
+ The dimensionality of the profile object is chosen by the number of
+ fields given in the bin_fields argument.
+
+ Parameters
+ ----------
+ data_source : AMR3DData Object
+ The data object to be profiled.
+ bin_fields : list of strings
+ List of the binning fields for profiling.
+ n : int or list of ints
+ The number of bins in each dimension. If None, 64 bins for
+ each bin are used for each bin field.
+ Default: 64.
+ weight_field : str
+ The weight field for computing weighted average for the profile
+ values. If None, the profile values are sums of the data in
+ each bin.
+ fields : list of strings
+ The fields to be profiled.
+ accumulation : bool or list of bools
+ If True, the profile values for a bin n are the cumulative sum of
+ all the values from bin 0 to n. If -True, the sum is reversed so
+ that the value for bin n is the cumulative sum from bin N (total bins)
+ to n. If the profile is 2D or 3D, a list of values can be given to
+ control the summation in each dimension independently.
+ Default: False.
+ fractional : If True the profile values are divided by the sum of all
+ the profile data such that the profile represents a probability
+ distribution function.
+
+ Examples
+ --------
+
+ Create a 1d profile. Access bin field from profile.x and field
+ data from profile.field_data.
+
+ >>> pf = load("DD0046/DD0046")
+ >>> ad = pf.h.all_data()
+ >>> profile = create_profile(ad, ["Density"],
+ ... fields=["Temperature", "x-velocity"]))
+ >>> print profile.x
+ >>> print profile.field_data["Temperature"]
+
+ """
+ if len(bin_fields) == 1:
+ cls = Profile1D
+ elif len(bin_fields) == 2:
+ cls = Profile2D
+ elif len(bin_fields) == 3:
+ cls = Profile3D
+ else:
+ raise NotImplementedError
+ if not iterable(n):
+ n = [n] * len(bin_fields)
+ if not iterable(accumulation):
+ accumulation = [accumulation] * len(bin_fields)
+ logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+ ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
+ for f, l in zip(bin_fields, logs)]
+ args = [data_source]
+ for f, n, (mi, ma), l in zip(bin_fields, n, ex, logs):
+ args += [f, n, mi, ma, l]
+ obj = cls(*args, weight_field = weight_field)
+ setattr(obj, "accumulation", accumulation)
+ setattr(obj, "fractional", fractional)
+ if fields is not None:
+ obj.add_fields(fields)
+ for field in fields:
+ if fractional:
+ obj.field_data[field] /= obj.field_data[field].sum()
+ for axis, acc in enumerate(accumulation):
+ if not acc: continue
+ temp = obj.field_data[field]
+ temp = np.rollaxis(temp, axis)
+ if acc < 0:
+ temp = temp[::-1]
+ temp = temp.cumsum(axis=0)
+ if acc < 0:
+ temp = temp[::-1]
+ temp = np.rollaxis(temp, axis)
+ obj.field_data[field] = temp
+
+ return obj
+
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -583,3 +583,85 @@
self.set_field_parameter('e0', e0)
self.set_field_parameter('e1', e1)
self.set_field_parameter('e2', e2)
+
+class YTCutRegionBase(YTSelectionContainer3D):
+ """
+ This is a data object designed to allow individuals to apply logical
+ operations to fields or particles and filter as a result of those cuts.
+
+ Parameters
+ ----------
+ base_object : YTSelectionContainer3D
+ The object to which cuts will be applied.
+ conditionals : list of strings
+ A list of conditionals that will be evaluated. In the namespace
+ available, these conditionals will have access to 'obj' which is a data
+ object of unknown shape, and they must generate a boolean array. For
+ instance, conditionals = ["obj['temperature'] < 1e3"]
+
+ Examples
+ --------
+
+ >>> pf = load("DD0010/moving7_0010")
+ >>> sp = pf.h.sphere("max", (1.0, 'mpc'))
+ >>> cr = pf.h.cut_region(sp, ["obj['temperature'] < 1e3"])
+ """
+ _type_name = "cut_region"
+ _con_args = ("base_object", "conditionals")
+ def __init__(self, base_object, conditionals, pf = None,
+ field_parameters = None):
+ super(YTCutRegionBase, self).__init__(base_object.center, pf, field_parameters)
+ self.conditionals = ensure_list(conditionals)
+ self.base_object = base_object
+ self._selector = None
+ # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth,
+ # ires and get_data
+
+ @property
+ def selector(self):
+ raise NotImplementedError
+
+ def chunks(self, fields, chunking_style, **kwargs):
+ # We actually want to chunk the sub-chunk, not ourselves. We have no
+ # chunks to speak of, as we do not data IO.
+ for chunk in self.hierarchy._chunk(self.base_object,
+ chunking_style,
+ **kwargs):
+ with self.base_object._chunked_read(chunk):
+ self.get_data(fields)
+ yield self
+
+ def get_data(self, fields = None):
+ fields = ensure_list(fields)
+ self.base_object.get_data(fields)
+ ind = self._cond_ind
+ for field in fields:
+ self.field_data[field] = self.base_object[field][ind]
+
+ @property
+ def _cond_ind(self):
+ ind = None
+ obj = self.base_object
+ with obj._field_parameter_state(self.field_parameters):
+ for cond in self.conditionals:
+ res = eval(cond)
+ if ind is None: ind = res
+ np.logical_and(res, ind, ind)
+ return ind
+
+ @property
+ def icoords(self):
+ return self.base_object.icoords[self._cond_ind,:]
+
+ @property
+ def fcoords(self):
+ return self.base_object.fcoords[self._cond_ind,:]
+
+ @property
+ def ires(self):
+ return self.base_object.ires[self._cond_ind]
+
+ @property
+ def fwidth(self):
+ return self.base_object.fwidth[self._cond_ind,:]
+
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -79,7 +79,7 @@
obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
- if not os.path.exists(apath): raise IOError(filename)
+ #if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
if obj._skip_cache is False:
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/tests/test_extract_regions.py
--- a/yt/data_objects/tests/test_extract_regions.py
+++ b/yt/data_objects/tests/test_extract_regions.py
@@ -6,15 +6,14 @@
def test_cut_region():
# We decompose in different ways
- return #TESTDISABLED
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs,
fields = ("Density", "Temperature", "x-velocity"))
# We'll test two objects
dd = pf.h.all_data()
- r = dd.cut_region( [ "grid['Temperature'] > 0.5",
- "grid['Density'] < 0.75",
- "grid['x-velocity'] > 0.25" ])
+ r = dd.cut_region( [ "obj['Temperature'] > 0.5",
+ "obj['Density'] < 0.75",
+ "obj['x-velocity'] > 0.25" ])
t = ( (dd["Temperature"] > 0.5 )
& (dd["Density"] < 0.75 )
& (dd["x-velocity"] > 0.25 ) )
@@ -23,33 +22,21 @@
yield assert_equal, np.all(r["x-velocity"] > 0.25), True
yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
+ r2 = r.cut_region( [ "obj['Temperature'] < 0.75" ] )
t2 = (r["Temperature"] < 0.75)
yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
yield assert_equal, np.all(r2["Temperature"] < 0.75), True
-def test_extract_region():
- # We decompose in different ways
- return #TESTDISABLED
- for nprocs in [1, 2, 4, 8]:
- pf = fake_random_pf(64, nprocs = nprocs,
- fields = ("Density", "Temperature", "x-velocity"))
- # We'll test two objects
+ # Now we can test some projections
dd = pf.h.all_data()
- t = ( (dd["Temperature"] > 0.5 )
- & (dd["Density"] < 0.75 )
- & (dd["x-velocity"] > 0.25 ) )
- r = dd.extract_region(t)
- yield assert_equal, np.all(r["Temperature"] > 0.5), True
- yield assert_equal, np.all(r["Density"] < 0.75), True
- yield assert_equal, np.all(r["x-velocity"] > 0.25), True
- yield assert_equal, np.sort(dd["Density"][t]), np.sort(r["Density"])
- yield assert_equal, np.sort(dd["x"][t]), np.sort(r["x"])
- t2 = (r["Temperature"] < 0.75)
- r2 = r.cut_region( [ "grid['Temperature'] < 0.75" ] )
- yield assert_equal, np.sort(r2["Temperature"]), np.sort(r["Temperature"][t2])
- yield assert_equal, np.all(r2["Temperature"] < 0.75), True
- t3 = (r["Temperature"] < 0.75)
- r3 = r.extract_region( t3 )
- yield assert_equal, np.sort(r3["Temperature"]), np.sort(r["Temperature"][t3])
- yield assert_equal, np.all(r3["Temperature"] < 0.75), True
+ cr = dd.cut_region(["obj['Ones'] > 0"])
+ for weight in [None, "Density"]:
+ p1 = pf.h.proj("Density", 0, data_source=dd, weight_field=weight)
+ p2 = pf.h.proj("Density", 0, data_source=cr, weight_field=weight)
+ for f in p1.field_data:
+ yield assert_almost_equal, p1[f], p2[f]
+ cr = dd.cut_region(["obj['Density'] > 0.25"])
+ p2 = pf.h.proj("Density", 2, data_source=cr)
+ yield assert_equal, p2["Density"].max() > 0.25, True
+ p2 = pf.h.proj("Density", 2, data_source=cr, weight_field = "Density")
+ yield assert_equal, p2["Density"].max() > 0.25, True
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -1,10 +1,11 @@
from yt.testing import *
from yt.data_objects.profiles import \
- BinnedProfile1D, BinnedProfile2D, BinnedProfile3D
+ BinnedProfile1D, BinnedProfile2D, BinnedProfile3D, \
+ Profile1D, Profile2D, Profile3D
_fields = ("Density", "Temperature", "Dinosaurs", "Tribbles")
-def test_profiles():
+def test_binned_profiles():
pf = fake_random_pf(64, nprocs = 8, fields = _fields)
nv = pf.domain_dimensions.prod()
dd = pf.h.all_data()
@@ -71,3 +72,83 @@
p3d.add_fields(["Ones"], weight="Temperature")
yield assert_equal, p3d["Ones"][:-1,:-1,:-1], np.ones((nb,nb,nb))
+def test_profiles():
+ pf = fake_random_pf(64, nprocs = 8, fields = _fields)
+ nv = pf.domain_dimensions.prod()
+ dd = pf.h.all_data()
+ (rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
+ ["Density", "Temperature", "Dinosaurs"])
+ rt, tt, dt = dd.quantities["TotalQuantity"](
+ ["Density", "Temperature", "Dinosaurs"])
+ # First we look at the
+ e1, e2 = 0.9, 1.1
+ for nb in [8, 16, 32, 64]:
+ # We log all the fields or don't log 'em all. No need to do them
+ # individually.
+ for lf in [True, False]:
+ p1d = Profile1D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ weight_field = None)
+ p1d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p1d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p1d["Temperature"].sum(), 7
+
+ p2d = Profile2D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ "Temperature", nb, tmi*e1, tma*e2, lf,
+ weight_field = None)
+ p2d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p2d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p2d["Temperature"].sum(), 7
+
+ p3d = Profile3D(dd,
+ "Density", nb, rmi*e1, rma*e2, lf,
+ "Temperature", nb, tmi*e1, tma*e2, lf,
+ "Dinosaurs", nb, dmi*e1, dma*e2, lf,
+ weight_field = None)
+ p3d.add_fields(["Ones", "Temperature"])
+ yield assert_equal, p3d["Ones"].sum(), nv
+ yield assert_rel_equal, tt, p3d["Temperature"].sum(), 7
+
+ p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p1d.add_fields("Ones")
+ av = nv / nb
+ yield assert_equal, p1d["Ones"], np.ones(nb)*av
+
+ # We re-bin ones with a weight now
+ p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p1d.add_fields(["Ones"])
+ yield assert_equal, p1d["Ones"], np.ones(nb)
+
+ p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p2d.add_fields("Ones")
+ av = nv / nb**2
+ yield assert_equal, p2d["Ones"], np.ones((nb, nb))*av
+
+ # We re-bin ones with a weight now
+ p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p2d.add_fields(["Ones"])
+ yield assert_equal, p2d["Ones"], np.ones((nb, nb))
+
+ p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ "z", nb, 0.0, 1.0, False,
+ weight_field = None)
+ p3d.add_fields("Ones")
+ av = nv / nb**3
+ yield assert_equal, p3d["Ones"], np.ones((nb, nb, nb))*av
+
+ # We re-bin ones with a weight now
+ p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
+ "y", nb, 0.0, 1.0, False,
+ "z", nb, 0.0, 1.0, False,
+ weight_field = "Temperature")
+ p3d.add_fields(["Ones"])
+ yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
+
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/data_objects/time_series.py
--- a/yt/data_objects/time_series.py
+++ b/yt/data_objects/time_series.py
@@ -90,9 +90,13 @@
... SlicePlot(pf, "x", "Density").save()
"""
- def __init__(self, outputs, parallel = True ,**kwargs):
+ def __init__(self, outputs, parallel = True, setup_function = None,
+ **kwargs):
self.tasks = AnalysisTaskProxy(self)
self.params = TimeSeriesParametersContainer(self)
+ if setup_function is None:
+ setup_function = lambda a: None
+ self._setup_function = setup_function
self._pre_outputs = outputs[:]
for type_name in data_object_registry:
setattr(self, type_name, functools.partial(
@@ -104,7 +108,9 @@
# We can make this fancier, but this works
for o in self._pre_outputs:
if isinstance(o, types.StringTypes):
- yield load(o,**self.kwargs)
+ pf = load(o, **self.kwargs)
+ self._setup_function(pf)
+ yield pf
else:
yield o
@@ -116,7 +122,8 @@
return TimeSeriesData(self._pre_outputs[key], self.parallel)
o = self._pre_outputs[key]
if isinstance(o, types.StringTypes):
- o = load(o,**self.kwargs)
+ o = load(o, **self.kwargs)
+ self._setup_function(o)
return o
def __len__(self):
@@ -163,7 +170,12 @@
This demonstrates how one might store results:
- >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy")
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
+ >>> ts = TimeSeriesData.from_filenames("DD*/DD*.hierarchy",
+ ... setup_function = print_time )
+ ...
>>> my_storage = {}
>>> for sto, pf in ts.piter(storage=my_storage):
... v, c = pf.h.find_max("Density")
@@ -215,7 +227,8 @@
return [v for k, v in sorted(return_values.items())]
@classmethod
- def from_filenames(cls, filenames, parallel = True, **kwargs):
+ def from_filenames(cls, filenames, parallel = True, setup_function = None,
+ **kwargs):
r"""Create a time series from either a filename pattern or a list of
filenames.
@@ -239,12 +252,19 @@
this is set to either True or an integer, it will be iterated with
1 or that integer number of processors assigned to each parameter
file provided to the loop.
+ setup_function : callable, accepts a pf
+ This function will be called whenever a parameter file is loaded.
Examples
--------
+ >>> def print_time(pf):
+ ... print pf.current_time
+ ...
>>> ts = TimeSeriesData.from_filenames(
- "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0")
+ ... "GasSloshingLowRes/sloshing_low_res_hdf5_plt_cnt_0[0-6][0-9]0",
+ ... setup_function = print_time)
+ ...
>>> for pf in ts:
... SlicePlot(pf, "x", "Density").save()
@@ -262,7 +282,8 @@
else:
filenames = glob.glob(filenames)
filenames.sort()
- obj = cls(filenames[:], parallel = parallel, **kwargs)
+ obj = cls(filenames[:], parallel = parallel,
+ setup_function = setup_function, **kwargs)
return obj
@classmethod
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/extern/progressbar/progressbar.py
--- a/yt/extern/progressbar/progressbar.py
+++ b/yt/extern/progressbar/progressbar.py
@@ -410,7 +410,9 @@
from IPython.display import Javascript, display
# First delete the node that held the progress bar from the page
js = """var element = document.getElementById('%s');
- element.parentNode.removeChild(element);""" % self.uuid
+ var parent = element.parentNode
+ parent.removeChild(element);
+ parent.parentElement.remove();""" % self.uuid
display(Javascript(js))
# Then also remove its trace from the cell output (so it doesn't get
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/fields/particle_fields.py
--- a/yt/fields/particle_fields.py
+++ b/yt/fields/particle_fields.py
@@ -459,3 +459,23 @@
validators=[ValidateParameter("normal"),
ValidateParameter("center")])
+
+def add_particle_average(registry, ptype, field_name,
+ weight = "particle_mass",
+ density = True):
+ def _pfunc_avg(field, data):
+ pos = data[ptype, "Coordinates"]
+ f = data[ptype, field_name]
+ wf = data[ptype, weight]
+ f *= wf
+ v = data.deposit(pos, [f], method = "sum")
+ w = data.deposit(pos, [wf], method = "sum")
+ v /= w
+ if density: v /= data["CellVolume"]
+ v[np.isnan(v)] = 0.0
+ return v
+ fn = ("deposit", "%s_avg_%s" % (ptype, field_name))
+ registry.add_field(fn, function=_pfunc_avg,
+ validators = [ValidateSpatial(0)],
+ particle_type = False)
+ return fn
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/fields/universal_fields.py
--- a/yt/fields/universal_fields.py
+++ b/yt/fields/universal_fields.py
@@ -582,12 +582,18 @@
units=r"\rm{s}^{-1}")
def _Contours(field, data):
- return -np.ones_like(data["Ones"])
-add_field("Contours", validators=[ValidateSpatial(0)], take_log=False,
- display_field=False, function=_Contours)
-add_field("tempContours", function=_Contours,
- validators=[ValidateSpatial(0), ValidateGridType()],
- take_log=False, display_field=False)
+ fd = data.get_field_parameter("contour_slices")
+ vals = data["Ones"] * -1
+ if fd is None or fd == 0.0:
+ return vals
+ for sl, v in fd.get(data.id, []):
+ vals[sl] = v
+ return vals
+add_field("Contours", validators=[ValidateSpatial(0)],
+ take_log=False,
+ display_field=False,
+ projection_conversion="1",
+ function=_Contours)
def obtain_velocities(data):
return obtain_rv_vec(data)
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/frontends/athena/data_structures.py
--- a/yt/frontends/athena/data_structures.py
+++ b/yt/frontends/athena/data_structures.py
@@ -26,6 +26,8 @@
StaticOutput
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
+from yt.utilities.lib import \
+ get_box_grids_level
from .fields import AthenaFieldInfo, KnownAthenaFields
from yt.data_objects.field_info_container import \
@@ -109,7 +111,7 @@
self.hierarchy_filename = self.parameter_file.filename
#self.directory = os.path.dirname(self.hierarchy_filename)
self._fhandle = file(self.hierarchy_filename,'rb')
- AMRHierarchy.__init__(self, pf, data_style)
+ GridGeometryHandler.__init__(self, pf, data_style)
self._fhandle.close()
@@ -161,7 +163,7 @@
def _setup_classes(self):
dd = self._get_data_reader_dict()
- AMRHierarchy._setup_classes(self, dd)
+ GridGeometryHandler._setup_classes(self, dd)
self.object_types.sort()
def _count_grids(self):
@@ -264,7 +266,7 @@
# know the extent of all the grids.
glis = np.round((glis - self.parameter_file.domain_left_edge)/gdds).astype('int')
new_dre = np.max(gres,axis=0)
- self.parameter_file.domain_right_edge = np.round(new_dre, decimals=6)
+ self.parameter_file.domain_right_edge = np.round(new_dre, decimals=12)
self.parameter_file.domain_width = \
(self.parameter_file.domain_right_edge -
self.parameter_file.domain_left_edge)
@@ -292,9 +294,9 @@
dxs.append(dx)
dx = np.array(dxs)
- self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=6)
+ self.grid_left_edge = np.round(self.parameter_file.domain_left_edge + dx*glis, decimals=12)
self.grid_dimensions = gdims.astype("int32")
- self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=6)
+ self.grid_right_edge = np.round(self.grid_left_edge + dx*self.grid_dimensions, decimals=12)
if self.parameter_file.dimensionality <= 2:
self.grid_right_edge[:,2] = self.parameter_file.domain_right_edge[2]
if self.parameter_file.dimensionality == 1:
@@ -305,13 +307,33 @@
for g in self.grids:
g._prepare_grid()
g._setup_dx()
+ self._reconstruct_parent_child()
+ """
for g in self.grids:
g.Children = self._get_grid_children(g)
for g1 in g.Children:
g1.Parent.append(g)
+ """
self.max_level = self.grid_levels.max()
+ def _reconstruct_parent_child(self):
+ mask = np.empty(len(self.grids), dtype='int32')
+ mylog.debug("First pass; identifying child grids")
+ for i, grid in enumerate(self.grids):
+ get_box_grids_level(self.grid_left_edge[i,:],
+ self.grid_right_edge[i,:],
+ self.grid_levels[i] + 1,
+ self.grid_left_edge, self.grid_right_edge,
+ self.grid_levels, mask)
+ #ids = np.where(mask.astype("bool")) # where is a tuple
+ #mask[ids] = True
+ grid.Children = [g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1]
+ mylog.debug("Second pass; identifying parents")
+ for i, grid in enumerate(self.grids): # Second pass
+ for child in grid.Children:
+ child.Parent.append(grid)
+
def _get_grid_children(self, grid):
mask = np.zeros(self.num_grids, dtype='bool')
grids, grid_ind = self.get_box_grids(grid.LeftEdge, grid.RightEdge)
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/frontends/athena/fields.py
--- a/yt/frontends/athena/fields.py
+++ b/yt/frontends/athena/fields.py
@@ -78,7 +78,7 @@
def _convertDensity(data) :
return data.convert("Density")
def _density(field, data) :
- return data["density"]
+ return data["density"].copy()
add_field("Density", function=_density, take_log=False,
units=r"\rm{g}/\rm{cm}^3", projected_units=r"\rm{g}/\rm{cm}^2",
convert_function=_convertDensity)
@@ -87,21 +87,21 @@
return data.convert("x-velocity")
def _xvelocity(field, data):
if "velocity_x" in data.pf.field_info:
- return data["velocity_x"]
+ return data["velocity_x"].copy()
else:
return data["momentum_x"]/data["density"]
add_field("x-velocity", function=_xvelocity, take_log=False,
units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
def _yvelocity(field, data):
if "velocity_y" in data.pf.field_info:
- return data["velocity_y"]
+ return data["velocity_y"].copy()
else:
return data["momentum_y"]/data["density"]
add_field("y-velocity", function=_yvelocity, take_log=False,
units=r"\rm{cm}/\rm{s}", convert_function=_convertVelocity)
def _zvelocity(field, data):
if "velocity_z" in data.pf.field_info:
- return data["velocity_z"]
+ return data["velocity_z"].copy()
else:
return data["momentum_z"]/data["density"]
add_field("z-velocity", function=_zvelocity, take_log=False,
@@ -128,7 +128,7 @@
return data.convert("Density")*data.convert("x-velocity")**2
def _pressure(field, data) :
if "pressure" in data.pf.field_info:
- return data["pressure"]
+ return data["pressure"].copy()
else:
eint = data["total_energy"] - 0.5*(data["momentum_x"]**2 +
data["momentum_y"]**2 +
@@ -152,19 +152,19 @@
units=r"\rm{K}")
def _convertBfield(data):
- return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
+ return np.sqrt(4*np.pi*data.convert("Density")*data.convert("x-velocity")**2)
def _Bx(field, data):
- return data['cell_centered_B_x']
+ return data['cell_centered_B_x'].copy()
add_field("Bx", function=_Bx, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_x",
convert_function=_convertBfield)
def _By(field, data):
- return data['cell_centered_B_y']
+ return data['cell_centered_B_y'].copy()
add_field("By", function=_By, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_y",
convert_function=_convertBfield)
def _Bz(field, data):
- return data['cell_centered_B_z']
+ return data['cell_centered_B_z'].copy()
add_field("Bz", function=_Bz, take_log=False,
units=r"\rm{Gauss}", display_name=r"B_z",
convert_function=_convertBfield)
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/frontends/athena/io.py
--- a/yt/frontends/athena/io.py
+++ b/yt/frontends/athena/io.py
@@ -15,6 +15,7 @@
from yt.utilities.io_handler import \
BaseIOHandler
import numpy as np
+from yt.funcs import mylog, defaultdict
class IOHandlerAthena(BaseIOHandler):
_data_style = "athena"
@@ -30,35 +31,45 @@
def _read_field_names(self,grid):
pass
- def _read_data(self,grid,field):
- f = file(grid.filename, 'rb')
- dtype, offsetr = grid.hierarchy._field_map[field]
- grid_ncells = np.prod(grid.ActiveDimensions)
- grid_dims = grid.ActiveDimensions
- grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
- read_table_offset = get_read_table_offset(f)
- if grid_ncells != grid0_ncells:
- offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
- if grid_ncells == grid0_ncells:
- offset = offsetr
- f.seek(read_table_offset+offset)
- if dtype == 'scalar':
- data = np.fromfile(f, dtype='>f4',
- count=grid_ncells).reshape(grid_dims,order='F').copy()
- if dtype == 'vector':
- data = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
- if '_x' in field:
- data = data[0::3].reshape(grid_dims,order='F').copy()
- elif '_y' in field:
- data = data[1::3].reshape(grid_dims,order='F').copy()
- elif '_z' in field:
- data = data[2::3].reshape(grid_dims,order='F').copy()
- f.close()
- if grid.pf.field_ordering == 1:
- return data.T.astype("float64")
- else:
- return data.astype("float64")
-
+ def _read_chunk_data(self,chunk,fields):
+ data = {}
+ grids_by_file = defaultdict(list)
+ if len(chunk.objs) == 0: return data
+ field_list = set(f[1] for f in fields)
+ for grid in chunk.objs:
+ if grid.filename is None:
+ continue
+ f = open(grid.filename, "rb")
+ data[grid.id] = {}
+ grid_ncells = np.prod(grid.ActiveDimensions)
+ grid_dims = grid.ActiveDimensions
+ grid0_ncells = np.prod(grid.hierarchy.grid_dimensions[0,:])
+ read_table_offset = get_read_table_offset(f)
+ for field in self.pf.h.field_list:
+ dtype, offsetr = grid.hierarchy._field_map[field]
+ if grid_ncells != grid0_ncells:
+ offset = offsetr + ((grid_ncells-grid0_ncells) * (offsetr//grid0_ncells))
+ if grid_ncells == grid0_ncells:
+ offset = offsetr
+ f.seek(read_table_offset+offset)
+ if dtype == 'scalar':
+ v = np.fromfile(f, dtype='>f4',
+ count=grid_ncells).reshape(grid_dims,order='F').copy()
+ if dtype == 'vector':
+ v = np.fromfile(f, dtype='>f4', count=3*grid_ncells)
+ if '_x' in field:
+ v = v[0::3].reshape(grid_dims,order='F').copy()
+ elif '_y' in field:
+ v = v[1::3].reshape(grid_dims,order='F').copy()
+ elif '_z' in field:
+ v = v[2::3].reshape(grid_dims,order='F').copy()
+ if grid.pf.field_ordering == 1:
+ data[grid.id][field] = v.T.astype("float64")
+ else:
+ data[grid.id][field] = v.astype("float64")
+ f.close()
+ return data
+
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
@@ -66,6 +77,27 @@
sl.reverse()
return self._read_data_set(grid, field)[sl]
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ chunks = list(chunks)
+ if any((ftype != "gas" for ftype, fname in fields)):
+ raise NotImplementedError
+ rv = {}
+ for field in fields:
+ rv[field] = np.empty(size, dtype="float64")
+ ng = sum(len(c.objs) for c in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s grids",
+ size, [f2 for f1, f2 in fields], ng)
+ ind = 0
+ for chunk in chunks:
+ data = self._read_chunk_data(chunk, fields)
+ for g in chunk.objs:
+ for field in fields:
+ ftype, fname = field
+ ds = data[g.id].pop(fname)
+ nd = g.select(selector, ds, rv[field], ind) # caches
+ ind += nd
+ data.pop(g.id)
+ return rv
def get_read_table_offset(f):
line = f.readline()
diff -r 43e5225bdbf373a8e67b2070fa3dec52a0f3e1f4 -r 24d5c142678f02f6861980624bc6de13de0b7b5e yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -842,16 +842,22 @@
else:
self.current_redshift = self.omega_lambda = self.omega_matter = \
self.hubble_constant = self.cosmological_simulation = 0.0
- self.particle_types = ["io"]
+ self.particle_types = []
if self.parameters["NumberOfParticles"] > 0 and \
"AppendActiveParticleType" in self.parameters.keys():
# If this is the case, then we know we should have a DarkMatter
# particle type, and we don't need the "io" type.
- self.particle_types = ["DarkMatter"]
self.parameters["AppendActiveParticleType"].append("DarkMatter")
+ else:
+ # We do not have an "io" type for Enzo particles if the
+ # ActiveParticle machinery is on, as we simply will ignore any of
+ # the non-DarkMatter particles in that case. However, for older
+ # datasets, we call this particle type "io".
+ self.particle_types = ["io"]
for ptype in self.parameters.get("AppendActiveParticleType", []):
self.particle_types.append(ptype)
self.particle_types = tuple(self.particle_types)
+ self.particle_types_raw = self.particle_types
if self.dimensionality == 1:
self._setup_1d()
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt/commits/edcdd0362274/
Changeset: edcdd0362274
Branch: yt-3.0
User: jzuhone
Date: 2013-12-19 15:16:02
Summary: Missed this
Affected #: 1 file
diff -r 24d5c142678f02f6861980624bc6de13de0b7b5e -r edcdd036227471aa4374cc21e754d3bdd216f80d yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -16,6 +16,7 @@
import base64
import types
+import os
from collections import defaultdict
from functools import wraps
@@ -35,8 +36,9 @@
write_image, apply_colormap
from yt.data_objects.profiles import \
create_profile
-from yt.utilities.lib import \
- write_png_to_string
+if os.name == "posix":
+ from yt.utilities.lib import \
+ write_png_to_string
from yt.data_objects.profiles import \
BinnedProfile1D, \
BinnedProfile2D
https://bitbucket.org/yt_analysis/yt/commits/50696f2cc502/
Changeset: 50696f2cc502
Branch: yt-3.0
User: jzuhone
Date: 2013-12-27 04:56:56
Summary: Merging
Affected #: 4 files
diff -r edcdd036227471aa4374cc21e754d3bdd216f80d -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 yt/frontends/ramses/api.py
--- a/yt/frontends/ramses/api.py
+++ b/yt/frontends/ramses/api.py
@@ -22,3 +22,6 @@
from .io import \
IOHandlerRAMSES
+
+from .definitions import \
+ field_aliases
diff -r edcdd036227471aa4374cc21e754d3bdd216f80d -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -13,6 +13,7 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+import os
import numpy as np
import stat
import weakref
@@ -28,7 +29,7 @@
from yt.data_objects.octree_subset import \
OctreeSubset
-from .definitions import ramses_header
+from .definitions import ramses_header, field_aliases
from yt.utilities.definitions import \
mpc_conversion, sec_conversion
from yt.utilities.lib import \
@@ -52,10 +53,10 @@
_last_mask = None
_last_selector_id = None
- def __init__(self, pf, domain_id, nvar):
- self.nvar = nvar
+ def __init__(self, pf, domain_id):
self.pf = pf
self.domain_id = domain_id
+ self.nvar = 0 # Set this later!
num = os.path.basename(pf.parameter_filename).split("."
)[0].split("_")[1]
basename = "%s/%%s_%s.out%05i" % (
@@ -65,6 +66,7 @@
for t in ['grav', 'hydro', 'part', 'amr']:
setattr(self, "%s_fn" % t, basename % t)
self._read_amr_header()
+ self._read_hydro_header()
self._read_particle_header()
self._read_amr()
@@ -102,9 +104,9 @@
hvals = fpu.read_attrs(f, header, "=")
except AssertionError:
print "You are running with the wrong number of fields."
- print "Please specify these in the load command."
- print "We are looking for %s fields." % self.nvar
- print "The last set of field sizes was: %s" % skipped
+ print "If you specified these in the load command, check the array length."
+ print "In this file there are %s hydro fields." % skipped
+ #print "The last set of field sizes was: %s" % skipped
raise
if hvals['file_ncache'] == 0: continue
assert(hvals['file_ilevel'] == level+1)
@@ -116,6 +118,13 @@
self._level_count = level_count
return self._hydro_offset
+ def _read_hydro_header(self):
+ if self.nvar > 0: return self.nvar
+ # Read the number of hydro variables
+ f = open(self.hydro_fn, "rb")
+ fpu.skip(f, 1)
+ self.nvar = fpu.read_vector(f, "i")[0]
+
def _read_particle_header(self):
if not os.path.exists(self.part_fn):
self.local_particle_count = 0
@@ -320,6 +329,7 @@
class RAMSESGeometryHandler(OctreeGeometryHandler):
def __init__(self, pf, data_style='ramses'):
+ self._pf = pf # TODO: Figure out the class composition better!
self.fluid_field_list = pf._fields_in_file
self.data_style = data_style
self.parameter_file = weakref.proxy(pf)
@@ -332,8 +342,7 @@
super(RAMSESGeometryHandler, self).__init__(pf, data_style)
def _initialize_oct_handler(self):
- nv = len(self.fluid_field_list)
- self.domains = [RAMSESDomainFile(self.parameter_file, i + 1, nv)
+ self.domains = [RAMSESDomainFile(self.parameter_file, i + 1)
for i in range(self.parameter_file['ncpu'])]
total_octs = sum(dom.local_oct_count #+ dom.ngridbound.sum()
for dom in self.domains)
@@ -341,14 +350,69 @@
self.num_grids = total_octs
def _detect_fields(self):
- # TODO: Add additional fields
+ # Do we want to attempt to figure out what the fields are in the file?
pfl = set([])
+ if self.fluid_field_list is None or len(self.fluid_field_list) <= 0:
+ self._setup_auto_fields()
for domain in self.domains:
pfl.update(set(domain.particle_field_offsets.keys()))
self.particle_field_list = list(pfl)
self.field_list = [("gas", f) for f in self.fluid_field_list] \
+ self.particle_field_list
+ def _setup_auto_fields(self):
+ '''
+ If no fluid fields are set, the code tries to set up a fluids array by hand
+ '''
+ # TODO: SUPPORT RT - THIS REQUIRES IMPLEMENTING A NEW FILE READER!
+ # Find nvar
+ # TODO: copy/pasted from DomainFile; needs refactoring!
+ num = os.path.basename(self._pf.parameter_filename).split("."
+ )[0].split("_")[1]
+ testdomain = 1 # Just pick the first domain file to read
+ basename = "%s/%%s_%s.out%05i" % (
+ os.path.abspath(
+ os.path.dirname(self._pf.parameter_filename)),
+ num, testdomain)
+ hydro_fn = basename % "hydro"
+ # Do we have a hydro file?
+ if hydro_fn:
+ # Read the number of hydro variables
+ f = open(hydro_fn, "rb")
+ fpu.skip(f, 1)
+ nvar = fpu.read_vector(f, "i")[0]
+ # OK, we got NVAR, now set up the arrays depending on what NVAR is
+ # Allow some wiggle room for users to add too many variables
+ if nvar < 5:
+ mylog.debug("nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s")
+ raise ValueError
+ # Basic hydro runs
+ if nvar == 5:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure"]
+ if nvar > 5 and nvar < 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "Pressure", "Metallicity"]
+ # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE
+ if nvar == 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure"]
+ if nvar > 11:
+ fields = ["Density",
+ "x-velocity", "y-velocity", "z-velocity",
+ "x-Bfield-left", "y-Bfield-left", "z-Bfield-left",
+ "x-Bfield-right", "y-Bfield-right", "z-Bfield-right",
+ "Pressure","Metallicity"]
+ while len(fields) < nvar:
+ fields.append("var"+str(len(fields)))
+ mylog.debug("No fields specified by user; automatically setting fields array to %s", str(fields))
+ self.fluid_field_list = fields
+
def _setup_derived_fields(self):
self._parse_cooling()
super(RAMSESGeometryHandler, self)._setup_derived_fields()
@@ -404,12 +468,14 @@
_particle_coordinates_name = "Coordinates"
def __init__(self, filename, data_style='ramses',
- fields = None,
- storage_filename = None):
+ fields = None, storage_filename = None):
# Here we want to initiate a traceback, if the reader is not built.
- if fields is None:
- fields = ["Density", "x-velocity", "y-velocity",
- "z-velocity", "Pressure", "Metallicity"]
+ if isinstance(fields, types.StringTypes):
+ fields = field_aliases[fields]
+ '''
+ fields: An array of hydro variable fields in order of position in the hydro_XXXXX.outYYYYY file
+ If set to None, will try a default set of fields
+ '''
self._fields_in_file = fields
StaticOutput.__init__(self, filename, data_style)
self.storage_filename = storage_filename
@@ -522,4 +588,3 @@
if not os.path.basename(args[0]).startswith("info_"): return False
fn = args[0].replace("info_", "amr_").replace(".txt", ".out00001")
return os.path.exists(fn)
-
diff -r edcdd036227471aa4374cc21e754d3bdd216f80d -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 yt/frontends/ramses/definitions.py
--- a/yt/frontends/ramses/definitions.py
+++ b/yt/frontends/ramses/definitions.py
@@ -28,6 +28,17 @@
('nout', 3, 'I')
)
yield header
+ # TODO: REMOVE
+ '''
+ hydro_header = ( ('ncpu', 1, 'i'),
+ ('nvar', 1, 'i'),
+ ('ndim', 1, 'i'),
+ ('nlevelmax', 1, 'i'),
+ ('nboundary', 1, 'i'),
+ ('gamma', 1, 'd')
+ )
+ yield hydro_header
+ '''
noutput, iout, ifout = hvals['nout']
next_set = ( ('tout', noutput, 'd'),
('aout', noutput, 'd'),
@@ -45,3 +56,18 @@
('numbl', hvals['nlevelmax'] * hvals['ncpu'], 'i'),
)
yield tree_header
+
+field_aliases = {
+ 'standard_five': ('Density',
+ 'x-velocity',
+ 'y-velocity',
+ 'z-velocity',
+ 'Pressure'),
+ 'standard_six': ('Density',
+ 'x-velocity',
+ 'y-velocity',
+ 'z-velocity',
+ 'Pressure',
+ 'Metallicity'),
+
+}
diff -r edcdd036227471aa4374cc21e754d3bdd216f80d -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -146,7 +146,7 @@
fields = self.plots.keys()
else:
fields = [field]
- for field in fields:
+ for field in self._field_check(fields):
if self._field_transform[field] == log_transform:
log[field] = True
else:
@@ -179,7 +179,7 @@
fields = self.plots.keys()
else:
fields = [field]
- for field in fields:
+ for field in self._field_check(fields):
self._colorbar_valid = False
self._colormaps[field] = cmap_name
return self
@@ -214,7 +214,7 @@
fields = self.plots.keys()
else:
fields = [field]
- for field in fields:
+ for field in self._field_check(fields):
myzmin = zmin
myzmax = zmax
if zmin == 'min':
https://bitbucket.org/yt_analysis/yt/commits/7e6d035b471c/
Changeset: 7e6d035b471c
Branch: yt-3.0
User: jzuhone
Date: 2013-12-31 02:03:18
Summary: First crack at de-cythonizing png_writer
Affected #: 9 files
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -19,7 +19,8 @@
from yt.visualization.image_writer import apply_colormap
from yt.visualization.fixed_resolution import FixedResolutionBuffer
-from yt.utilities.lib import write_png_to_string, get_color_bounds
+from yt.utilities.lib import get_color_bounds
+from yt.utilities.png_writer import write_png_to_string
import yt.extern.bottle as bottle
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -21,8 +21,7 @@
from .Interpolators import *
from .misc_utilities import *
from .Octree import *
-if os.name == "posix":
- from .png_writer import *
+from .image_utilities import *
from .PointsInVolume import *
from .QuadTree import *
from .RayIntegrators import *
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/utilities/lib/image_utilities.pyx
--- /dev/null
+++ b/yt/utilities/lib/image_utilities.pyx
@@ -0,0 +1,39 @@
+"""
+Utilities for images
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from fp_utils cimport iclip
+
+def add_points_to_image(
+ np.ndarray[np.uint8_t, ndim=3] buffer,
+ np.ndarray[np.float64_t, ndim=1] px,
+ np.ndarray[np.float64_t, ndim=1] py,
+ np.float64_t pv):
+ cdef int i, j, k, pi
+ cdef int np = px.shape[0]
+ cdef int xs = buffer.shape[0]
+ cdef int ys = buffer.shape[1]
+ cdef int v
+ v = iclip(<int>(pv * 255), 0, 255)
+ for pi in range(np):
+ j = <int> (xs * px[pi])
+ i = <int> (ys * py[pi])
+ for k in range(3):
+ buffer[i, j, k] = 0
+ return
+ #for i in range(xs):
+ # for j in range(ys):
+ # for k in range(3):
+ # v = buffer[i, j, k]
+ # buffer[i, j, k] = iclip(v, 0, 255)
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/utilities/lib/png_writer.pyx
--- a/yt/utilities/lib/png_writer.pyx
+++ /dev/null
@@ -1,317 +0,0 @@
-"""
-A light interface to libpng
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-cimport numpy as np
-cimport cython
-from libc.stdlib cimport malloc, realloc, free
-from libc.string cimport memcpy
-from cpython.string cimport PyString_FromStringAndSize
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-
-from libc.stdio cimport fopen, fclose, FILE
-
-cdef extern from "stdlib.h":
- # NOTE that size_t might not be int
- void *alloca(int)
-
-# Idiom for accessing Python files, from Cython FAQ
-# First, declare the Python macro to access files:
-cdef extern from "Python.h":
- ctypedef struct FILE
- FILE* PyFile_AsFile(object)
- void fprintf(FILE* f, char* s, char* s)
-
-cdef extern from "png.h":
- ctypedef unsigned long png_uint_32
- ctypedef long png_int_32
- ctypedef unsigned short png_uint_16
- ctypedef short png_int_16
- ctypedef unsigned char png_byte
- ctypedef void *png_voidp
- ctypedef png_byte *png_bytep
- ctypedef png_uint_32 *png_uint_32p
- ctypedef png_int_32 *png_int_32p
- ctypedef png_uint_16 *png_uint_16p
- ctypedef png_int_16 *png_int_16p
- ctypedef char *png_charp
- ctypedef char *png_const_charp
- ctypedef FILE *png_FILE_p
-
- ctypedef struct png_struct:
- png_voidp io_ptr
- ctypedef png_struct *png_structp
-
- ctypedef struct png_info:
- pass
- ctypedef png_info *png_infop
-
- ctypedef struct png_color_8:
- png_byte red
- png_byte green
- png_byte blue
- png_byte gray
- png_byte alpha
- ctypedef png_color_8 *png_color_8p
-
- cdef png_const_charp PNG_LIBPNG_VER_STRING
-
- # Note that we don't support error or warning functions
- png_structp png_create_write_struct(
- png_const_charp user_png_ver, png_voidp error_ptr,
- void *error_fn, void *warn_fn)
-
- png_infop png_create_info_struct(png_structp png_ptr)
-
- void png_init_io(png_structp png_ptr, png_FILE_p fp)
-
- void png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
- png_uint_32 width, png_uint_32 height, int bit_depth,
- int color_type, int interlace_method, int compression_method,
- int filter_method)
-
- cdef int PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE
- cdef int PNG_COLOR_TYPE_GRAY, PNG_INTERLACE_ADAM7
- cdef int PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE
-
- void png_set_pHYs(png_structp png_ptr, png_infop info_ptr,
- png_uint_32 res_x, png_uint_32 res_y, int unit_type)
-
- cdef int PNG_RESOLUTION_METER
-
- void png_set_sBIT(png_structp png_ptr, png_infop info_ptr,
- png_color_8p sig_bit)
-
- ctypedef void (*png_rw_ptr) (png_structp, png_bytep, size_t)
- ctypedef void (*png_flush_ptr) (png_structp)
- void png_set_write_fn(png_structp png_ptr, png_voidp io_ptr,
- png_rw_ptr write_data_fn,
- png_flush_ptr output_flush_fn)
- png_voidp png_get_io_ptr (png_structp png_ptr)
-
- void png_write_info(png_structp png_ptr, png_infop info_ptr)
- void png_set_rows(png_structp png_ptr, png_infop info_ptr,
- png_bytep *row_pointers)
- void png_write_image(png_structp png_ptr, png_bytep *image)
- void png_write_end(png_structp png_ptr, png_infop info_ptr)
- void png_write_png(png_structp png_ptr, png_infop info_ptr,
- int transforms, png_voidp params)
- cdef int PNG_TRANSFORM_IDENTITY
-
- void png_destroy_write_struct(
- png_structp *png_ptr_ptr, png_infop *info_ptr_ptr)
-
-def write_png_to_file(np.ndarray[np.uint8_t, ndim=3] buffer,
- object py_fileobj, int dpi=100,
- int close = 0):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
- cdef FILE *fileobj = PyFile_AsFile(py_fileobj)
-
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- png_init_io(png_ptr, fileobj)
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- png_write_info(png_ptr, info_ptr)
- png_write_image(png_ptr, row_pointers)
- png_write_end(png_ptr, info_ptr)
-
- if close == 1: fclose(fileobj)
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
-def write_png(np.ndarray[np.uint8_t, ndim=3] buffer,
- char *filename, int dpi=100):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
-
- cdef FILE* fileobj = fopen(filename, "wb")
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- png_init_io(png_ptr, fileobj)
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- png_write_info(png_ptr, info_ptr)
- png_write_image(png_ptr, row_pointers)
- png_write_end(png_ptr, info_ptr)
-
- fclose(fileobj)
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
-
-# Much of this is inspired by and translated from this StackOverflow question:
-# http://stackoverflow.com/questions/1821806/how-to-encode-png-to-buffer-using-libpng
-
-cdef public struct mem_encode:
- char *buffer
- size_t size
-
-cdef public void my_png_write_data(png_structp png_ptr, png_bytep data,
- size_t length):
- cdef png_voidp temp = png_get_io_ptr(png_ptr)
- cdef mem_encode *p = <mem_encode *> temp
- cdef size_t nsize = p.size + length
- if p.buffer != NULL:
- p.buffer = <char *> realloc(p.buffer, nsize)
- else:
- p.buffer = <char *> malloc(nsize)
- memcpy(p.buffer + p.size, data, length)
- p.size += length
-
-cdef public void my_png_flush(png_structp png_ptr):
- return
-
-def write_png_to_string(np.ndarray[np.uint8_t, ndim=3] buffer, int dpi=100,
- int gray = 0):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
-
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- cdef int im_type, interlace_type
- interlace_type = PNG_INTERLACE_NONE
- if gray == 0:
- im_type = PNG_COLOR_TYPE_RGB_ALPHA
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
- elif gray == 1:
- im_type = PNG_COLOR_TYPE_GRAY
- sig_bit.gray = 8
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 0
- else:
- raise RuntimeError
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- im_type, interlace_type,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- cdef mem_encode state
- state.buffer = NULL
- state.size = 0
-
- png_set_write_fn(png_ptr, <png_voidp> &state, my_png_write_data, NULL)
- png_set_rows(png_ptr, info_ptr, row_pointers)
- png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL)
-
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
- pp = PyString_FromStringAndSize(state.buffer, state.size)
- if state.buffer != NULL: free(state.buffer)
- return pp
-
-def add_points_to_image(
- np.ndarray[np.uint8_t, ndim=3] buffer,
- np.ndarray[np.float64_t, ndim=1] px,
- np.ndarray[np.float64_t, ndim=1] py,
- np.float64_t pv):
- cdef int i, j, k, pi
- cdef int np = px.shape[0]
- cdef int xs = buffer.shape[0]
- cdef int ys = buffer.shape[1]
- cdef int v
- v = iclip(<int>(pv * 255), 0, 255)
- for pi in range(np):
- j = <int> (xs * px[pi])
- i = <int> (ys * py[pi])
- for k in range(3):
- buffer[i, j, k] = 0
- return
- #for i in range(xs):
- # for j in range(ys):
- # for k in range(3):
- # v = buffer[i, j, k]
- # buffer[i, j, k] = iclip(v, 0, 255)
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -5,11 +5,6 @@
from yt.utilities.setup import \
check_for_dependencies
-
-def check_for_png():
- return check_for_dependencies("PNG_DIR", "png.cfg", "png.h", "png")
-
-
def check_for_openmp():
# Create a temporary directory
tmpdir = tempfile.mkdtemp()
@@ -103,14 +98,10 @@
config.add_extension("Octree",
["yt/utilities/lib/Octree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
- if os.name == "posix":
- config.add_extension("png_writer",
- ["yt/utilities/lib/png_writer.pyx"],
- define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
- include_dirs=[png_inc],
- library_dirs=[png_lib],
- libraries=["m", "png"],
- depends=["yt/utilities/lib/fp_utils.pxd"]),
+ config.add_extension("image_utilities",
+ ["yt/utilities/lib/image_utilities.pyx"],
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd"]),
config.add_extension("PointsInVolume",
["yt/utilities/lib/PointsInVolume.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -287,7 +287,7 @@
self.transport = transport
def __call__(self, val):
- from yt.utilities.lib import write_png_to_string
+ from yt.utilities.png_writer import write_png_to_string
from yt.visualization.image_writer import map_to_colors
image = np.log10(val)
mi = np.nanmin(image[~np.isinf(image)])
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -19,6 +19,7 @@
from yt.funcs import *
import _colormap_data as cmd
import yt.utilities.lib as au
+import yt.utilities.png_writer as pw
import __builtin__
def scale_image(image, mi=None, ma=None):
@@ -106,7 +107,7 @@
alpha_channel = scale_image(alpha_channel)
image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
image = image.transpose().copy() # Have to make sure it's contiguous
- au.write_png(image, fn)
+ pw.write_png(image, fn)
def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
r"""Write out a bitmapped image directly to a PNG file.
@@ -151,9 +152,9 @@
if transpose:
bitmap_array = bitmap_array.swapaxes(0,1)
if filename is not None:
- au.write_png(bitmap_array.copy(), filename)
+ pw.write_png(bitmap_array.copy(), filename)
else:
- return au.write_png_to_string(bitmap_array.copy())
+ return pw.write_png_to_string(bitmap_array.copy())
return bitmap_array
def write_image(image, filename, color_bounds = None, cmap_name = "algae", func = lambda x: x):
@@ -195,7 +196,7 @@
mylog.info("Using only channel 1 of supplied image")
image = image[:,:,0]
to_plot = apply_colormap(image, color_bounds = color_bounds, cmap_name = cmap_name)
- au.write_png(to_plot, filename)
+ pw.write_png(to_plot, filename)
return to_plot
def apply_colormap(image, color_bounds = None, cmap_name = 'algae', func=lambda x: x):
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -41,8 +41,7 @@
from yt.funcs import \
mylog, iterable, ensure_list, fix_axis, \
get_ipython_api_version
-if os.name == "posix":
- from yt.utilities.lib import write_png_to_string
+from yt.utilities.png_writer import write_png_to_string
from yt.utilities.definitions import \
x_dict, y_dict, \
axis_names, axis_labels, \
diff -r 50696f2cc502664b2d967da7072ca9b98aecfdc5 -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -36,9 +36,7 @@
write_image, apply_colormap
from yt.data_objects.profiles import \
create_profile
-if os.name == "posix":
- from yt.utilities.lib import \
- write_png_to_string
+from yt.utilities.png_writer import write_png_to_string
from yt.data_objects.profiles import \
BinnedProfile1D, \
BinnedProfile2D
https://bitbucket.org/yt_analysis/yt/commits/7d5d150c48b7/
Changeset: 7d5d150c48b7
Branch: yt-3.0
User: jzuhone
Date: 2013-12-31 02:31:00
Summary: Removing check_for_png
Affected #: 1 file
diff -r 7e6d035b471ca84b4a4eb2d2091d368bc0e627aa -r 7d5d150c48b70242b075eeda7f98a032cb15b59f yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -46,7 +46,6 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lib',parent_package,top_path)
- png_inc, png_lib = check_for_png()
if check_for_openmp() == True:
omp_args = ['-fopenmp']
else:
https://bitbucket.org/yt_analysis/yt/commits/45907902a4f4/
Changeset: 45907902a4f4
Branch: yt-3.0
User: jzuhone
Date: 2013-12-31 03:00:07
Summary: Forgot to add this
Affected #: 1 file
diff -r 7d5d150c48b70242b075eeda7f98a032cb15b59f -r 45907902a4f49045dfa165040ae32111b9c7b347 yt/utilities/png_writer.py
--- /dev/null
+++ b/yt/utilities/png_writer.py
@@ -0,0 +1,29 @@
+"""
+Writing PNGs
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import matplotlib._png as _png
+import StringIO
+
+def write_png(buffer, filename, dpi=100):
+ width = buffer.shape[1]
+ height = buffer.shape[0]
+ _png.write_png(buffer, width, height, filename, dpi)
+
+def write_png_to_string(buffer, dpi=100, gray=0):
+ width = buffer.shape[1]
+ height = buffer.shape[0]
+ fileobj = StringIO.StringIO()
+ _png.write_png(buffer, width, height, fileobj, dpi)
+ png_str = fileobj.getvalue()
+ fileobj.close()
+ return png_str
+
https://bitbucket.org/yt_analysis/yt/commits/b45ffffaddf3/
Changeset: b45ffffaddf3
Branch: yt-3.0
User: jzuhone
Date: 2014-01-01 00:31:22
Summary: Type problems causing test failures on Windows... not sure why it's so picky...
Affected #: 5 files
diff -r 45907902a4f49045dfa165040ae32111b9c7b347 -r b45ffffaddf3f033823885a0f5589fe42b95f928 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -22,7 +22,7 @@
from yt.utilities.lib import \
VoxelTraversal, planar_points_in_volume, find_grids_in_inclined_box, \
grid_points_in_volume
-from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace
+from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
from yt.utilities.orientation import Orientation
from .data_containers import \
YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
diff -r 45907902a4f49045dfa165040ae32111b9c7b347 -r b45ffffaddf3f033823885a0f5589fe42b95f928 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -71,7 +71,7 @@
def add_grids(self, grids):
gles = np.array([g.LeftEdge for g in grids])
gres = np.array([g.RightEdge for g in grids])
- gids = np.array([g.id for g in grids])
+ gids = np.array([g.id for g in grids]).astype("int64")
add_pygrids(self.trunk, gids.size, gles, gres, gids,
self.comm_rank, self.comm_size)
del gles, gres, gids, grids
diff -r 45907902a4f49045dfa165040ae32111b9c7b347 -r b45ffffaddf3f033823885a0f5589fe42b95f928 yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -65,7 +65,7 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
-def clyindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1,
+def cylindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1,
np.ndarray[np.float64_t, ndim=1] p2,
np.ndarray[np.float64_t, ndim=2] left_edges,
np.ndarray[np.float64_t, ndim=2] right_edges):
@@ -152,7 +152,7 @@
np.argwhere(tmmright).flat,
np.argwhere(tpmright).flat,]))
if 0 == inds.shape[0]:
- inds = np.arange(I)
+ inds = np.arange(np.int64(I))
thetaleft = np.empty(I)
thetaleft.fill(p1[2])
thetaright = np.empty(I)
diff -r 45907902a4f49045dfa165040ae32111b9c7b347 -r b45ffffaddf3f033823885a0f5589fe42b95f928 yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -8,7 +8,7 @@
from numpy.testing import assert_array_equal, assert_array_almost_equal
from yt.testing import amrspace
-from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace, _cyl2cart
+from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace, _cyl2cart
left_grid = right_grid = amr_levels = center_grid = data = None
@@ -57,13 +57,13 @@
assert_true(np.all(bupper >= arr))
-def test_clyindrical_ray_trace():
+def test_cylindrical_ray_trace():
for pair in point_pairs:
p1, p2 = pair
p1cart, p2cart = _cyl2cart(pair)
pathlen = np.sqrt(np.sum((p2cart - p1cart)**2))
- t, s, rztheta, inds = clyindrical_ray_trace(p1, p2, left_grid, right_grid)
+ t, s, rztheta, inds = cylindrical_ray_trace(p1, p2, left_grid, right_grid)
npoints = len(t)
yield check_monotonic_inc, t
diff -r 45907902a4f49045dfa165040ae32111b9c7b347 -r b45ffffaddf3f033823885a0f5589fe42b95f928 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -211,9 +211,9 @@
y_vals = data_object[self.y_name].ravel().astype('float64')
z_vals = data_object[self.z_name].ravel().astype('float64')
- x_i = np.digitize(x_vals, self.x_bins) - 1
- y_i = np.digitize(y_vals, self.y_bins) - 1
- z_i = np.digitize(z_vals, self.z_bins) - 1
+ x_i = np.digitize(x_vals, self.x_bins).astype("int32") - 1
+ y_i = np.digitize(y_vals, self.y_bins).astype("int32") - 1
+ z_i = np.digitize(z_vals, self.z_bins).astype("int32") - 1
if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
https://bitbucket.org/yt_analysis/yt/commits/73f539f7930a/
Changeset: 73f539f7930a
Branch: yt-3.0
User: jzuhone
Date: 2014-01-01 00:31:32
Summary: Merging
Affected #: 20 files
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -32,7 +32,10 @@
BinnedProfile1D, \
BinnedProfile2D, \
BinnedProfile3D, \
- create_profile
+ create_profile, \
+ Profile1D, \
+ Profile2D, \
+ Profile3D
from time_series import \
TimeSeriesData, \
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -503,7 +503,16 @@
continue
fd = self.pf.field_dependencies.get(field, None) or \
self.pf.field_dependencies.get(field[1], None)
- if fd is None: continue
+ # This is long overdue. Any time we *can't* find a field
+ # dependency -- for instance, if the derived field has been added
+ # after parameter file instantiation -- let's just try to
+ # recalculate it.
+ if fd is None:
+ try:
+ fd = fi.get_dependencies(pf = self.pf)
+ self.pf.field_dependencies[field] = fd
+ except:
+ continue
requested = self._determine_fields(list(set(fd.requested)))
deps = [d for d in requested if d not in fields_to_get]
fields_to_get += deps
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -757,8 +757,10 @@
def add_fields(self, fields):
fields = ensure_list(fields)
temp_storage = ProfileFieldAccumulator(len(fields), self.size)
- for g in parallel_objects(self.data_source._grids):
- self._bin_grid(g, fields, temp_storage)
+ cfields = fields + list(self.bin_fields)
+ citer = self.data_source.chunks(cfields, "io")
+ for chunk in parallel_objects(citer):
+ self._bin_chunk(chunk, fields, temp_storage)
self._finalize_storage(fields, temp_storage)
def _finalize_storage(self, fields, temp_storage):
@@ -772,42 +774,35 @@
self.field_data[field] = temp_storage.values[...,i]
self.field_data[field][blank] = 0.0
- def _bin_grid(self, grid, fields, storage):
+ def _bin_chunk(self, chunk, fields, storage):
raise NotImplementedError
- def _filter(self, bin_fields, cut_points):
- # cut_points is initially just the points inside our region
+ def _filter(self, bin_fields):
+ # cut_points is set to be everything initially, but
# we also want to apply a filtering based on min/max
- filter = np.zeros(bin_fields[0].shape, dtype='bool')
- filter[cut_points] = True
+ filter = np.ones(bin_fields[0].shape, dtype='bool')
for (mi, ma), data in zip(self.bounds, bin_fields):
filter &= (data > mi)
filter &= (data < ma)
return filter, [data[filter] for data in bin_fields]
- def _get_data(self, grid, fields):
- # Save the values in the grid beforehand.
- old_params = grid.field_parameters
- old_keys = grid.field_data.keys()
- grid.field_parameters = self.data_source.field_parameters
- # Now we ask our source which values to include
- pointI = self.data_source._get_point_indices(grid)
- bin_fields = [grid[bf] for bf in self.bin_fields]
+ def _get_data(self, chunk, fields):
+ # We are using chunks now, which will manage the field parameters and
+ # the like.
+ bin_fields = [chunk[bf] for bf in self.bin_fields]
# We want to make sure that our fields are within the bounds of the
# binning
- filter, bin_fields = self._filter(bin_fields, pointI)
+ filter, bin_fields = self._filter(bin_fields)
if not np.any(filter): return None
arr = np.zeros((bin_fields[0].size, len(fields)), dtype="float64")
for i, field in enumerate(fields):
- arr[:,i] = grid[field][filter]
+ arr[:,i] = chunk[field][filter]
if self.weight_field is not None:
- weight_data = grid[self.weight_field]
+ weight_data = chunk[self.weight_field]
else:
- weight_data = np.ones(grid.ActiveDimensions, dtype="float64")
+ weight_data = np.ones(chunk.ires.size, dtype="float64")
weight_data = weight_data[filter]
# So that we can pass these into
- grid.field_parameters = old_params
- grid.field_data = YTFieldData( [(k, grid.field_data[k]) for k in old_keys] )
return arr, weight_data, bin_fields
def __getitem__(self, key):
@@ -835,10 +830,10 @@
self.bounds = ((self.x_bins[0], self.x_bins[-1]),)
self.x = self.x_bins
- def _bin_grid(self, grid, fields, storage):
- gd = self._get_data(grid, fields)
- if gd is None: return
- fdata, wdata, (bf_x,) = gd
+ def _bin_chunk(self, chunk, fields, storage):
+ rv = self._get_data(chunk, fields)
+ if rv is None: return
+ fdata, wdata, (bf_x,) = rv
bin_ind = np.digitize(bf_x, self.x_bins) - 1
new_bin_profile1d(bin_ind, wdata, fdata,
storage.weight_values, storage.values,
@@ -867,8 +862,8 @@
self.x = self.x_bins
self.y = self.y_bins
- def _bin_grid(self, grid, fields, storage):
- rv = self._get_data(grid, fields)
+ def _bin_chunk(self, chunk, fields, storage):
+ rv = self._get_data(chunk, fields)
if rv is None: return
fdata, wdata, (bf_x, bf_y) = rv
bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -912,8 +907,8 @@
self.y = self.y_bins
self.z = self.z_bins
- def _bin_grid(self, grid, fields, storage):
- rv = self._get_data(grid, fields)
+ def _bin_chunk(self, chunk, fields, storage):
+ rv = self._get_data(chunk, fields)
if rv is None: return
fdata, wdata, (bf_x, bf_y, bf_z) = rv
bin_ind_x = np.digitize(bf_x, self.x_bins) - 1
@@ -983,11 +978,15 @@
cls = Profile3D
else:
raise NotImplementedError
+ bin_fields = data_source._determine_fields(bin_fields)
+ fields = data_source._determine_fields(fields)
+ if weight_field is not None:
+ weight_field, = data_source._determine_fields([weight_field])
if not iterable(n):
n = [n] * len(bin_fields)
if not iterable(accumulation):
accumulation = [accumulation] * len(bin_fields)
- logs = [data_source.pf.field_info[f].take_log for f in bin_fields]
+ logs = [data_source.pf._get_field_info(f).take_log for f in bin_fields]
ex = [data_source.quantities["Extrema"](f, non_zero=l)[0] \
for f, l in zip(bin_fields, logs)]
args = [data_source]
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/tests/test_cutting_plane.py
--- a/yt/data_objects/tests/test_cutting_plane.py
+++ b/yt/data_objects/tests/test_cutting_plane.py
@@ -32,6 +32,7 @@
fns += pw.save(name=tmpname)
frb = cut.to_frb((1.0,'unitary'), 64)
for cut_field in ['Ones', 'Density']:
+ fi = pf._get_field_info("unknown", cut_field)
yield assert_equal, frb[cut_field].info['data_source'], \
cut.__str__()
yield assert_equal, frb[cut_field].info['axis'], \
@@ -39,7 +40,7 @@
yield assert_equal, frb[cut_field].info['field'], \
cut_field
yield assert_equal, frb[cut_field].info['units'], \
- pf.field_info[cut_field].get_units()
+ fi.get_units()
yield assert_equal, frb[cut_field].info['xlim'], \
frb.bounds[:2]
yield assert_equal, frb[cut_field].info['ylim'], \
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/tests/test_profiles.py
--- a/yt/data_objects/tests/test_profiles.py
+++ b/yt/data_objects/tests/test_profiles.py
@@ -152,3 +152,25 @@
p3d.add_fields(["Ones"])
yield assert_equal, p3d["Ones"], np.ones((nb,nb,nb))
+def test_particle_profiles():
+ for nproc in [1, 2, 4, 8]:
+ pf = fake_random_pf(32, nprocs=nproc, particles = 32**3)
+ dd = pf.h.all_data()
+
+ p1d = Profile1D(dd, "particle_position_x", 128,
+ 0.0, 1.0, False, weight_field = None)
+ p1d.add_fields(["particle_ones"])
+ yield assert_equal, p1d["particle_ones"].sum(), 32**3
+
+ p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+ "particle_position_y", 128, 0.0, 1.0, False,
+ weight_field = None)
+ p2d.add_fields(["particle_ones"])
+ yield assert_equal, p2d["particle_ones"].sum(), 32**3
+
+ p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
+ "particle_position_y", 128, 0.0, 1.0, False,
+ "particle_position_z", 128, 0.0, 1.0, False,
+ weight_field = None)
+ p3d.add_fields(["particle_ones"])
+ yield assert_equal, p3d["particle_ones"].sum(), 32**3
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/tests/test_projection.py
--- a/yt/data_objects/tests/test_projection.py
+++ b/yt/data_objects/tests/test_projection.py
@@ -46,6 +46,7 @@
fns += pw.save(name=tmpname)
frb = proj.to_frb((1.0,'unitary'), 64)
for proj_field in ['Ones', 'Density']:
+ fi = pf._get_field_info(proj_field)
yield assert_equal, frb[proj_field].info['data_source'], \
proj.__str__()
yield assert_equal, frb[proj_field].info['axis'], \
@@ -53,7 +54,7 @@
yield assert_equal, frb[proj_field].info['field'], \
proj_field
yield assert_equal, frb[proj_field].info['units'], \
- pf.field_info[proj_field].get_units()
+ fi.get_units()
yield assert_equal, frb[proj_field].info['xlim'], \
frb.bounds[:2]
yield assert_equal, frb[proj_field].info['ylim'], \
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/data_objects/tests/test_slice.py
--- a/yt/data_objects/tests/test_slice.py
+++ b/yt/data_objects/tests/test_slice.py
@@ -67,6 +67,7 @@
fns += pw.save(name=tmpname)
frb = slc.to_frb((1.0, 'unitary'), 64)
for slc_field in ['Ones', 'Density']:
+ fi = pf._get_field_info(slc_field)
yield assert_equal, frb[slc_field].info['data_source'], \
slc.__str__()
yield assert_equal, frb[slc_field].info['axis'], \
@@ -74,7 +75,7 @@
yield assert_equal, frb[slc_field].info['field'], \
slc_field
yield assert_equal, frb[slc_field].info['units'], \
- pf.field_info[slc_field].get_units()
+ fi.get_units()
yield assert_equal, frb[slc_field].info['xlim'], \
frb.bounds[:2]
yield assert_equal, frb[slc_field].info['ylim'], \
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/fields/tests/test_fields.py
--- a/yt/fields/tests/test_fields.py
+++ b/yt/fields/tests/test_fields.py
@@ -21,17 +21,24 @@
cp_z_vec = np.array((0.0, 0.0, 1.0)),
)
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
+_base_fields = (("gas", "Density"),
+ ("gas", "x-velocity"),
+ ("gas", "y-velocity"),
+ ("gas", "z-velocity"))
+_base_field_names = [f[1] for f in _base_fields]
def realistic_pf(fields, nprocs):
np.random.seed(int(0x4d3d3d3))
- pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
+ fields = list(set([_strip_ftype(f) for f in fields]))
+ pf = fake_random_pf(16, fields = fields, nprocs = nprocs,
+ particles = 4**3)
pf.parameters["HydroMethod"] = "streaming"
pf.parameters["Gamma"] = 5.0/3.0
pf.parameters["EOSType"] = 1.0
pf.parameters["EOSSoundSpeed"] = 1.0
pf.conversion_factors["Time"] = 1.0
pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
+ pf.gamma = 5.0/3.0
pf.current_redshift = 0.0001
pf.hubble_constant = 0.7
pf.omega_matter = 0.27
@@ -41,6 +48,20 @@
pf.units[unit+'hcm'] = pf.units[unit]
return pf
+def _strip_ftype(field):
+ if not isinstance(field, tuple):
+ return field
+ elif field[0] == "all":
+ return field
+ return field[1]
+
+def _expand_field(field):
+ if isinstance(field, tuple):
+ return field
+ if "particle" in field:
+ return ("all", field)
+ return ("gas", field)
+
class TestFieldAccess(object):
description = None
@@ -52,8 +73,13 @@
def __call__(self):
field = FieldInfo[self.field_name]
+ # Don't test the base fields
+ if field in _base_fields or field in _base_field_names: return
deps = field.get_dependencies()
- fields = list(set(deps.requested + _base_fields))
+ fields = set([])
+ for f in deps.requested + list(_base_fields):
+ fields.add(_expand_field(f))
+ fields = list(fields)
skip_grids = False
needs_spatial = False
for v in field.validators:
@@ -85,7 +111,7 @@
assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
def test_all_fields():
- for field in FieldInfo:
+ for field in sorted(FieldInfo):
if isinstance(field, types.TupleType):
fname = field[0]
else:
@@ -93,13 +119,16 @@
if fname.startswith("CuttingPlane"): continue
if fname.startswith("particle"): continue
if fname.startswith("CIC"): continue
- if field.startswith("BetaPar"): continue
- if field.startswith("TBetaPar"): continue
- if field.startswith("BetaPerp"): continue
+ if fname.startswith("BetaPar"): continue
+ if fname.startswith("TBetaPar"): continue
+ if fname.startswith("BetaPerp"): continue
if fname.startswith("WeakLensingConvergence"): continue
if fname.startswith("DensityPerturbation"): continue
if fname.startswith("Matter_Density"): continue
if fname.startswith("Overdensity"): continue
+ # TotalMass is disabled because of issues with mixed particle/fluid
+ # field detection in current field system.
+ if fname.startswith("TotalMass"): continue
if FieldInfo[field].particle_type: continue
for nproc in [1, 4, 8]:
test_all_fields.__name__ = "%s_%s" % (field, nproc)
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -25,6 +25,8 @@
YTFieldData, \
YTDataContainer, \
YTSelectionContainer
+from yt.data_objects.particle_unions import \
+ ParticleUnion
from yt.data_objects.grid_patch import \
AMRGridPatch
from yt.geometry.geometry_handler import \
@@ -66,7 +68,8 @@
from .fields import \
StreamFieldInfo, \
add_stream_field, \
- KnownStreamFields
+ KnownStreamFields, \
+ _setup_particle_fields
class StreamGrid(AMRGridPatch):
"""
@@ -224,7 +227,12 @@
GridGeometryHandler._setup_classes(self, dd)
def _detect_fields(self):
- self.field_list = list(set(self.stream_handler.get_fields()))
+ # NOTE: Because particle unions add to the actual field list, without
+ # having the keys in the field list itself, we need to double check
+ # here.
+ fl = set(self.stream_handler.get_fields())
+ fl.update(set(getattr(self, "field_list", [])))
+ self.field_list = list(fl)
def _populate_grid_objects(self):
for g in self.grids:
@@ -245,16 +253,14 @@
already in the stream but not part of the data dict will be left
alone.
"""
-
+ [update_field_names(d) for d in data]
particle_types = set_particle_types(data[0])
- ftype = "all"
+ ftype = "io"
for key in data[0].keys() :
if key is "number_of_particles": continue
self.stream_handler.particle_types[key] = particle_types[key]
- if key not in self.field_list:
- self.field_list.append(key)
-
+
for i, grid in enumerate(self.grids) :
if data[i].has_key("number_of_particles") :
grid.NumberOfParticles = data[i].pop("number_of_particles")
@@ -262,10 +268,16 @@
if fname in grid.field_data:
grid.field_data.pop(fname, None)
elif (ftype, fname) in grid.field_data:
- grid.field_data.pop( ("all", fname) )
+ grid.field_data.pop( ("io", fname) )
self.stream_handler.fields[grid.id][fname] = data[i][fname]
+
+ # We only want to create a superset of fields here.
self._detect_fields()
+ mylog.debug("Creating Particle Union 'all'")
+ pu = ParticleUnion("all", list(self.pf.particle_types_raw))
+ self.pf.add_particle_union(pu)
+ self.pf.particle_types = tuple(set(self.pf.particle_types))
self._setup_unknown_fields()
class StreamStaticOutput(StaticOutput):
@@ -323,10 +335,34 @@
def _skip_cache(self):
return True
+ def _setup_particle_type(self, ptype):
+ orig = set(self.field_info.items())
+ _setup_particle_fields(self.field_info, ptype)
+ return [n for n, v in set(self.field_info.items()).difference(orig)]
+
class StreamDictFieldHandler(dict):
+ _additional_fields = ()
@property
- def all_fields(self): return self[0].keys()
+ def all_fields(self):
+ fields = list(self._additional_fields) + self[0].keys()
+ fields = list(set(fields))
+ return fields
+
+def update_field_names(data):
+ orig_names = data.keys()
+ for k in orig_names:
+ if isinstance(k, tuple): continue
+ s = getattr(data[k], "shape", ())
+ if len(s) == 1:
+ field = ("io", k)
+ elif len(s) == 3:
+ field = ("gas", k)
+ elif len(s) == 0:
+ continue
+ else:
+ raise NotImplementedError
+ data[field] = data.pop(k)
def set_particle_types(data) :
@@ -353,7 +389,7 @@
if pf.h.num_grids > 1 :
try:
- x, y, z = (pdata["all","particle_position_%s" % ax] for ax in 'xyz')
+ x, y, z = (pdata["io","particle_position_%s" % ax] for ax in 'xyz')
except KeyError:
raise KeyError("Cannot decompose particle data without position fields!")
@@ -450,9 +486,16 @@
pdata["number_of_particles"] = number_of_particles
for key in data.keys() :
if len(data[key].shape) == 1 :
- pdata[key] = data.pop(key)
+ if not isinstance(key, tuple):
+ field = ("io", key)
+ mylog.debug("Reassigning '%s' to '%s'", key, field)
+ else:
+ field = key
+ sfh._additional_fields += (field,)
+ pdata[field] = data.pop(key)
else :
particle_types = {}
+ update_field_names(data)
if nprocs > 1:
temp = {}
@@ -508,12 +551,11 @@
# Now figure out where the particles go
if number_of_particles > 0 :
- if ("all", "particle_position_x") not in pdata:
+ if ("io", "particle_position_x") not in pdata:
pdata_ftype = {}
for f in [k for k in sorted(pdata)]:
if not hasattr(pdata[f], "shape"): continue
- mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
- pdata_ftype["all",f] = pdata.pop(f)
+ pdata_ftype["io",f] = pdata.pop(f)
pdata_ftype.update(pdata)
pdata = pdata_ftype
assign_particle_data(spf, pdata)
@@ -593,6 +635,7 @@
grid_levels[i,:] = g.pop("level")
if g.has_key("number_of_particles") :
number_of_particles[i,:] = g.pop("number_of_particles")
+ update_field_names(g)
sfh[i] = g
handler = StreamHandler(
@@ -665,7 +708,10 @@
if number_of_particles > 0 :
pdata = {}
for field in base_pf.h.field_list :
- if base_pf.field_info[field].particle_type :
+ if not isinstance(field, tuple):
+ field = ("unknown", field)
+ fi = base_pf._get_field_info(*field)
+ if fi.particle_type :
pdata[field] = np.concatenate([grid[field]
for grid in base_pf.h.grids])
pdata["number_of_particles"] = number_of_particles
@@ -688,7 +734,10 @@
level = g.Level,
dimensions = g.ActiveDimensions )
for field in pf.h.field_list:
- if not pf.field_info[field].particle_type :
+ if not isinstance(field, tuple):
+ field = ("unknown", field)
+ fi = pf._get_field_info(*field)
+ if not fi.particle_type :
gd[field] = g[field]
grid_data.append(gd)
if g.Level < pf.h.max_level: continue
@@ -701,7 +750,10 @@
gd = dict(left_edge = LE, right_edge = grid.right_edge,
level = g.Level + 1, dimensions = dims)
for field in pf.h.field_list:
- if not pf.field_info[field].particle_type :
+ if not isinstance(field, tuple):
+ field = ("unknown", field)
+ fi = pf._get_field_info(*field)
+ if not fi.particle_type :
gd[field] = grid[field]
grid_data.append(gd)
@@ -712,12 +764,12 @@
# Now reassign particle data to grids
if number_of_particles > 0:
- if ("all", "particle_position_x") not in pdata:
+ if ("io", "particle_position_x") not in pdata:
pdata_ftype = {}
for f in [k for k in sorted(pdata)]:
if not hasattr(pdata[f], "shape"): continue
- mylog.debug("Reassigning '%s' to ('all','%s')", f, f)
- pdata_ftype["all",f] = pdata.pop(f)
+ mylog.debug("Reassigning '%s' to ('io','%s')", f, f)
+ pdata_ftype["io",f] = pdata.pop(f)
pdata_ftype.update(pdata)
pdata = pdata_ftype
assign_particle_data(pf, pdata)
@@ -751,17 +803,6 @@
n_ref = 64
over_refine_factor = 1
- def _setup_particle_type(self, ptype):
- orig = set(self.field_info.items())
- particle_vector_functions(ptype,
- ["particle_position_%s" % ax for ax in 'xyz'],
- ["particle_velocity_%s" % ax for ax in 'xyz'],
- self.field_info)
- particle_deposition_functions(ptype,
- "Coordinates", "particle_mass", self.field_info)
- standard_particle_fields(self.field_info, ptype)
- return [n for n, v in set(self.field_info.items()).difference(orig)]
-
def load_particles(data, sim_unit_to_cm, bbox=None,
sim_time=0.0, periodicity=(True, True, True),
n_ref = 64, over_refine_factor = 1):
@@ -819,8 +860,19 @@
sfh = StreamDictFieldHandler()
+ pdata = {}
+ for key in data.keys() :
+ if not isinstance(key, tuple):
+ field = ("io", key)
+ mylog.debug("Reassigning '%s' to '%s'", key, field)
+ else:
+ field = key
+ pdata[field] = data[key]
+ sfh._additional_fields += (field,)
+ data = pdata # Drop reference count
+ update_field_names(data)
particle_types = set_particle_types(data)
-
+
sfh.update({'stream_file':data})
grid_left_edges = domain_left_edge
grid_right_edges = domain_right_edge
@@ -874,7 +926,7 @@
self.oct_handler = oct_handler
self._last_mask = None
self._last_selector_id = None
- self._current_particle_type = 'all'
+ self._current_particle_type = 'io'
self._current_fluid_type = self.pf.default_fluid_type
self.base_region = base_region
self.base_selector = base_region.selector
@@ -953,7 +1005,13 @@
super(StreamOctreeHandler, self)._setup_classes(dd)
def _detect_fields(self):
- self.field_list = list(set(self.stream_handler.get_fields()))
+ # NOTE: Because particle unions add to the actual field list, without
+ # having the keys in the field list itself, we need to double check
+ # here.
+ fl = set(self.stream_handler.get_fields())
+ fl.update(set(getattr(self, "field_list", [])))
+ self.field_list = list(fl)
+
class StreamOctreeStaticOutput(StreamStaticOutput):
_hierarchy_class = StreamOctreeHandler
@@ -1005,6 +1063,7 @@
domain_left_edge = np.array(bbox[:, 0], 'float64')
domain_right_edge = np.array(bbox[:, 1], 'float64')
grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+ update_field_names(data)
sfh = StreamDictFieldHandler()
@@ -1093,7 +1152,13 @@
self.io = io_registry[self.data_style](self.pf)
def _detect_fields(self):
- self.field_list = list(set(self.stream_handler.get_fields()))
+ # NOTE: Because particle unions add to the actual field list, without
+ # having the keys in the field list itself, we need to double check
+ # here.
+ fl = set(self.stream_handler.get_fields())
+ fl.update(set(getattr(self, "field_list", [])))
+ self.field_list = list(fl)
+
class StreamHexahedralStaticOutput(StreamStaticOutput):
_hierarchy_class = StreamHexahedralHierarchy
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,37 +34,20 @@
StreamFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
add_field = StreamFieldInfo.add_field
-add_stream_field("density", function = NullFunc)
+add_stream_field("Density", function = NullFunc)
add_stream_field("x-velocity", function = NullFunc)
add_stream_field("y-velocity", function = NullFunc)
add_stream_field("z-velocity", function = NullFunc)
-add_field("Density", function = TranslationFunc("density"))
-
-add_stream_field("particle_position_x", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_y", function = NullFunc, particle_type=True)
-add_stream_field("particle_position_z", function = NullFunc, particle_type=True)
-add_stream_field("particle_index", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_density", function = NullFunc, particle_type=True)
-add_stream_field("particle_gas_temperature", function = NullFunc, particle_type=True)
-add_stream_field("particle_mass", function = NullFunc, particle_type=True)
-
-add_field("ParticleMass", function = TranslationFunc("particle_mass"),
- particle_type=True)
-
-add_stream_field(("all", "particle_position_x"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_y"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_position_z"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_index"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_density"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_gas_temperature"), function = NullFunc, particle_type=True)
-add_stream_field(("all", "particle_mass"), function = NullFunc, particle_type=True)
-
-add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
- particle_type=True)
-
-particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
- ["particle_velocity_%s" % ax for ax in 'xyz'],
- StreamFieldInfo)
-particle_deposition_functions("all", "Coordinates", "ParticleMass",
- StreamFieldInfo)
+def _setup_particle_fields(registry, ptype):
+ for fn in ["creation_time", "dynamical_time", "metallicity_fraction"] + \
+ ["particle_type", "particle_index", "particle_mass"] + \
+ ["particle_position_%s" % ax for ax in 'xyz'] + \
+ ["particle_velocity_%s" % ax for ax in 'xyz']:
+ registry.add_field((ptype, fn), function=NullFunc, particle_type=True)
+ particle_vector_functions(ptype,
+ ["particle_position_%s" % ax for ax in 'xyz'],
+ ["particle_velocity_%s" % ax for ax in 'xyz'],
+ registry)
+ particle_deposition_functions(ptype,
+ "Coordinates", "particle_mass", registry)
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -46,7 +46,7 @@
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
- if any((ftype not in ("gas", "deposit") for ftype, fname in fields)):
+ if any((ftype not in ("gas",) for ftype, fname in fields)):
raise NotImplementedError
rv = {}
for field in fields:
@@ -57,63 +57,38 @@
size, [f2 for f1, f2 in fields], ng)
for field in fields:
ftype, fname = field
- if ftype == 'deposit':
- fname = field
ind = 0
for chunk in chunks:
for g in chunk.objs:
- ds = self.fields[g.id][fname]
+ ds = self.fields[g.id][field]
ind += g.select(selector, ds, rv[field], ind) # caches
return rv
- def _read_particle_selection(self, chunks, selector, fields):
+ def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
- if any((ftype != "all" for ftype, fname in fields)):
- raise NotImplementedError
- rv = {}
- # Now we have to do something unpleasant
- mylog.debug("First pass: counting particles.")
- size = 0
- pfields = [("all", "particle_position_%s" % ax) for ax in 'xyz']
for chunk in chunks:
for g in chunk.objs:
if g.NumberOfParticles == 0: continue
gf = self.fields[g.id]
- # Sometimes the stream operator won't have the
- # ("all", "Something") fields, but instead just "Something".
- pns = []
- for pn in pfields:
- if pn in gf: pns.append(pn)
- else: pns.append(pn[1])
- size += g.count_particles(selector,
- gf[pns[0]], gf[pns[1]], gf[pns[2]])
- for field in fields:
- # TODO: figure out dataset types
- rv[field] = np.empty(size, dtype='float64')
- ng = sum(len(c.objs) for c in chunks)
- mylog.debug("Reading %s points of %s fields in %s grids",
- size, [f2 for f1, f2 in fields], ng)
- ind = 0
+ for ptype, field_list in sorted(ptf.items()):
+ x, y, z = (gf[ptype, "particle_position_%s" % ax]
+ for ax in 'xyz')
+ yield ptype, (x, y, z)
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ chunks = list(chunks)
for chunk in chunks:
for g in chunk.objs:
if g.NumberOfParticles == 0: continue
gf = self.fields[g.id]
- pns = []
- for pn in pfields:
- if pn in gf: pns.append(pn)
- else: pns.append(pn[1])
- mask = g.select_particles(selector,
- gf[pns[0]], gf[pns[1]], gf[pns[2]])
- if mask is None: continue
- for field in set(fields):
- if field in gf:
- fn = field
- else:
- fn = field[1]
- gdata = gf[fn][mask]
- rv[field][ind:ind+gdata.size] = gdata
- ind += gdata.size
- return rv
+ for ptype, field_list in sorted(ptf.items()):
+ x, y, z = (gf[ptype, "particle_position_%s" % ax]
+ for ax in 'xyz')
+ mask = selector.select_points(x, y, z)
+ if mask is None: continue
+ for field in field_list:
+ data = np.asarray(gf[ptype, field])
+ yield (ptype, field), data[mask]
@property
def _read_exception(self):
@@ -127,19 +102,8 @@
self.fields = pf.stream_handler.fields
super(StreamParticleIOHandler, self).__init__(pf)
- def _read_particle_selection(self, chunks, selector, fields):
- rv = {}
- # We first need a set of masks for each particle type
- ptf = defaultdict(list)
- psize = defaultdict(lambda: 0)
+ def _read_particle_coords(self, chunks, ptf):
chunks = list(chunks)
- for ftype, fname in fields:
- ptf[ftype].append(fname)
- # For this type of file, we actually have something slightly different.
- # We are given a list of ParticleDataChunks, which is composed of
- # individual ParticleOctreeSubsets. The data_files attribute on these
- # may in fact overlap. So we will iterate over a union of all the
- # data_files.
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
@@ -148,39 +112,32 @@
f = self.fields[data_file.filename]
# This double-reads
for ptype, field_list in sorted(ptf.items()):
- assert(ptype == "all")
- psize[ptype] += selector.count_points(
- f["particle_position_x"],
- f["particle_position_y"],
- f["particle_position_z"])
- # Now we have all the sizes, and we can allocate
- ind = {}
- for field in fields:
- mylog.debug("Allocating %s values for %s", psize[field[0]], field)
- rv[field] = np.empty(psize[field[0]], dtype="float64")
- ind[field] = 0
+ yield ptype, (f[ptype, "particle_position_x"],
+ f[ptype, "particle_position_y"],
+ f[ptype, "particle_position_z"])
+
+ def _read_particle_fields(self, chunks, ptf, selector):
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
for data_file in data_files:
f = self.fields[data_file.filename]
for ptype, field_list in sorted(ptf.items()):
- assert(ptype == "all")
- mask = selector.select_points(
- f["particle_position_x"],
- f["particle_position_y"],
- f["particle_position_z"])
+ x, y, z = (f[ptype, "particle_position_%s" % ax]
+ for ax in 'xyz')
+ mask = selector.select_points(x, y, z)
if mask is None: continue
for field in field_list:
- data = f[field][mask,...]
- my_ind = ind[ptype, field]
- mylog.debug("Filling from %s to %s with %s",
- my_ind, my_ind+data.shape[0], field)
- rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
- ind[ptype, field] += data.shape[0]
- return rv
+ data = f[ptype, field][mask]
+ yield (ptype, field), data
+
def _initialize_index(self, data_file, regions):
# self.fields[g.id][fname] is the pattern here
pos = np.column_stack(self.fields[data_file.filename][
- "particle_position_%s" % ax] for ax in 'xyz')
+ ("io", "particle_position_%s" % ax)]
+ for ax in 'xyz')
if np.any(pos.min(axis=0) < data_file.pf.domain_left_edge) or \
np.any(pos.max(axis=0) > data_file.pf.domain_right_edge):
raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
@@ -194,11 +151,11 @@
return morton
def _count_particles(self, data_file):
- npart = self.fields[data_file.filename]["particle_position_x"].size
- return {'all': npart}
+ npart = self.fields[data_file.filename]["io", "particle_position_x"].size
+ return {'io': npart}
def _identify_fields(self, data_file):
- return [ ("all", k) for k in self.fields[data_file.filename].keys()]
+ return self.fields[data_file.filename].keys()
class IOHandlerStreamHexahedral(BaseIOHandler):
_data_style = "stream_hexahedral"
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/frontends/stream/tests/test_stream_particles.py
--- a/yt/frontends/stream/tests/test_stream_particles.py
+++ b/yt/frontends/stream/tests/test_stream_particles.py
@@ -28,7 +28,6 @@
# Check that all of this runs ok without particles
- ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0)
ug0 = load_uniform_grid({"Density": dens}, domain_dims, 1.0, nprocs=8)
amr0 = refine_amr(ug0, rc, fo, 3)
@@ -74,16 +73,18 @@
# Check to make sure the fields have been defined correctly
- assert ug1._get_field_info("all", "particle_position_x").particle_type
- assert ug1._get_field_info("all", "particle_position_y").particle_type
- assert ug1._get_field_info("all", "particle_position_z").particle_type
- assert ug1._get_field_info("all", "particle_mass").particle_type
+ for ptype in ("all", "io"):
+ assert ug1._get_field_info(ptype, "particle_position_x").particle_type
+ assert ug1._get_field_info(ptype, "particle_position_y").particle_type
+ assert ug1._get_field_info(ptype, "particle_position_z").particle_type
+ assert ug1._get_field_info(ptype, "particle_mass").particle_type
assert not ug1._get_field_info("gas", "Density").particle_type
- assert ug2._get_field_info("all", "particle_position_x").particle_type
- assert ug2._get_field_info("all", "particle_position_y").particle_type
- assert ug2._get_field_info("all", "particle_position_z").particle_type
- assert ug2._get_field_info("all", "particle_mass").particle_type
+ for ptype in ("all", "io"):
+ assert ug2._get_field_info(ptype, "particle_position_x").particle_type
+ assert ug2._get_field_info(ptype, "particle_position_y").particle_type
+ assert ug2._get_field_info(ptype, "particle_position_z").particle_type
+ assert ug2._get_field_info(ptype, "particle_mass").particle_type
assert not ug2._get_field_info("gas", "Density").particle_type
# Now refine this
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -88,9 +88,11 @@
for n_ref in [16, 32, 64, 512, 1024]:
pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
dd = pf.h.all_data()
- bi = dd["all","mesh_id"]
+ bi = dd["io","mesh_id"]
v = np.bincount(bi.astype("int64"))
yield assert_equal, v.max() <= n_ref, True
+ bi2 = dd["all","mesh_id"]
+ yield assert_equal, bi, bi2
def test_particle_overrefine():
np.random.seed(int(0x4d3d3d3))
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -54,7 +54,8 @@
ValidateParameter, ValidateDataField, ValidateProperty, \
ValidateSpatial, ValidateGridType, \
TimeSeriesData, AnalysisTask, analysis_task, \
- ImageArray, particle_filter, create_profile
+ ImageArray, particle_filter, create_profile, \
+ Profile1D, Profile2D, Profile3D
from yt.data_objects.derived_quantities import \
add_quantity, quantity_info
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/testing.py
--- a/yt/testing.py
+++ b/yt/testing.py
@@ -138,7 +138,7 @@
return left, right, level
def fake_random_pf(ndims, peak_value = 1.0, fields = ("Density",),
- negative = False, nprocs = 1):
+ negative = False, nprocs = 1, particles = 0):
from yt.data_objects.api import data_object_registry
from yt.frontends.stream.api import load_uniform_grid
if not iterable(ndims):
@@ -156,6 +156,13 @@
offsets.append(0.0)
data = dict((field, (np.random.random(ndims) - offset) * peak_value)
for field,offset in zip(fields,offsets))
+ if particles:
+ for f in ('particle_position_%s' % ax for ax in 'xyz'):
+ data[f] = np.random.uniform(size = particles)
+ for f in ('particle_velocity_%s' % ax for ax in 'xyz'):
+ data[f] = np.random.random(size = particles) - 0.5
+ data['particle_mass'] = np.random.random(particles)
+ data['number_of_particles'] = particles
ug = load_uniform_grid(data, ndims, 1.0, nprocs = nprocs)
return ug
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/utilities/grid_data_format/writer.py
--- a/yt/utilities/grid_data_format/writer.py
+++ b/yt/utilities/grid_data_format/writer.py
@@ -59,7 +59,9 @@
A dictionary of field parameters to set.
"""
- field_obj = pf.field_info[field_name]
+ if isinstance(field_name, tuple):
+ field_name = field_name[1]
+ field_obj = pf._get_field_info(field_name)
if field_obj.particle_type:
print( "Saving particle fields currently not supported." )
return
@@ -84,6 +86,9 @@
# add field info to field_types group
g = fhandle["field_types"]
# create the subgroup with the field's name
+ if isinstance(field_name, tuple):
+ field_name = field_name[1]
+ fi = pf._get_field_info(field_name)
try:
sg = g.create_group(field_name)
except ValueError:
@@ -91,8 +96,8 @@
sys.exit(1)
# grab the display name and units from the field info container.
- display_name = pf.field_info[field_name].display_name
- units = pf.field_info[field_name].get_units()
+ display_name = fi.display_name
+ units = fi.get_units()
# check that they actually contain something...
if display_name:
@@ -122,9 +127,8 @@
pt_group = particles_group[particle_type_name]
# add the field data to the grid group
# Check if this is a real field or particle data.
- field_obj = pf.field_info[field_name]
grid.get_data(field_name)
- if field_obj.particle_type: # particle data
+ if fi.particle_type: # particle data
pt_group[field_name] = grid[field_name]
else: # a field
grid_group[field_name] = grid[field_name]
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/utilities/particle_generator.py
--- a/yt/utilities/particle_generator.py
+++ b/yt/utilities/particle_generator.py
@@ -5,9 +5,9 @@
class ParticleGenerator(object) :
- default_fields = ["particle_position_x",
- "particle_position_y",
- "particle_position_z"]
+ default_fields = [("io", "particle_position_x"),
+ ("io", "particle_position_y"),
+ ("io", "particle_position_z")]
def __init__(self, pf, num_particles, field_list) :
"""
@@ -20,7 +20,7 @@
self.pf = pf
self.num_particles = num_particles
self.field_list = field_list
- self.field_list.append("particle_index")
+ self.field_list.append(("io", "particle_index"))
try :
self.posx_index = self.field_list.index(self.default_fields[0])
@@ -28,9 +28,8 @@
self.posz_index = self.field_list.index(self.default_fields[2])
except :
raise KeyError("Field list must contain the following fields: " +
- "\'particle_position_x\', \'particle_position_y\'" +
- ", \'particle_position_z\' ")
- self.index_index = self.field_list.index("particle_index")
+ "\n".join(self.default_fields))
+ self.index_index = self.field_list.index(("io", "particle_index"))
self.num_grids = self.pf.h.num_grids
self.NumberOfParticles = np.zeros((self.num_grids), dtype='int64')
@@ -212,9 +211,9 @@
"""
field_list = data.keys()
- x = data.pop("particle_position_x")
- y = data.pop("particle_position_y")
- z = data.pop("particle_position_z")
+ x = data.pop(("io", "particle_position_x"))
+ y = data.pop(("io", "particle_position_y"))
+ z = data.pop(("io", "particle_position_z"))
xcond = np.logical_or(x < pf.domain_left_edge[0],
x >= pf.domain_right_edge[0])
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/utilities/tests/test_particle_generator.py
--- a/yt/utilities/tests/test_particle_generator.py
+++ b/yt/utilities/tests/test_particle_generator.py
@@ -23,11 +23,13 @@
# Now generate particles from density
- field_list = ["particle_position_x","particle_position_y",
- "particle_position_z","particle_index",
- "particle_gas_density"]
+ field_list = [("io", "particle_position_x"),
+ ("io", "particle_position_y"),
+ ("io", "particle_position_z"),
+ ("io", "particle_index"),
+ ("io", "particle_gas_density")]
num_particles = 1000000
- field_dict = {"Density": "particle_gas_density"}
+ field_dict = {("gas", "Density"): ("io", "particle_gas_density")}
sphere = pf.h.sphere(pf.domain_center, 0.45)
particles1 = WithDensityParticleGenerator(pf, sphere, num_particles, field_list)
@@ -41,6 +43,8 @@
particles_per_grid1 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
yield assert_equal, particles_per_grid1, particles1.NumberOfParticles
+ tags = np.concatenate([grid["particle_index"] for grid in pf.h.grids])
+ assert(np.unique(tags).size == num_particles)
# Set up a lattice of particles
pdims = np.array([64,64,64])
def new_indices() :
@@ -48,31 +52,33 @@
return np.arange((np.product(pdims)))+num_particles
le = np.array([0.25,0.25,0.25])
re = np.array([0.75,0.75,0.75])
- new_field_list = field_list + ["particle_gas_temperature"]
- new_field_dict = {"Density": "particle_gas_density",
- "Temperature": "particle_gas_temperature"}
+ new_field_list = field_list + [("io", "particle_gas_temperature")]
+ new_field_dict = {("gas", "Density"): ("io", "particle_gas_density"),
+ ("gas", "Temperature"): ("io", "particle_gas_temperature")}
particles2 = LatticeParticleGenerator(pf, pdims, le, re, new_field_list)
particles2.assign_indices(function=new_indices)
particles2.map_grid_fields_to_particles(new_field_dict)
#Test lattice positions
- xpos = np.unique(particles2["particle_position_x"])
- ypos = np.unique(particles2["particle_position_y"])
- zpos = np.unique(particles2["particle_position_z"])
+ xpos = np.unique(particles2["io", "particle_position_x"])
+ ypos = np.unique(particles2["io", "particle_position_y"])
+ zpos = np.unique(particles2["io", "particle_position_z"])
xpred = np.linspace(le[0],re[0],num=pdims[0],endpoint=True)
ypred = np.linspace(le[1],re[1],num=pdims[1],endpoint=True)
zpred = np.linspace(le[2],re[2],num=pdims[2],endpoint=True)
- yield assert_almost_equal, xpos, xpred
- yield assert_almost_equal, ypos, ypred
- yield assert_almost_equal, zpos, zpred
+ assert_almost_equal( xpos, xpred)
+ assert_almost_equal( ypos, ypred)
+ assert_almost_equal( zpos, zpred)
#Test the number of particles again
particles2.apply_to_stream()
particles_per_grid2 = [grid.NumberOfParticles for grid in pf.h.grids]
yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
+
+ [grid.field_data.clear() for grid in pf.h.grids]
particles_per_grid2 = [len(grid["particle_position_x"]) for grid in pf.h.grids]
yield assert_equal, particles_per_grid2, particles1.NumberOfParticles+particles2.NumberOfParticles
diff -r b45ffffaddf3f033823885a0f5589fe42b95f928 -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -119,7 +119,7 @@
Parameters
----------
- data_source : AMR4DData Object
+ data_source : AMR3DData Object
The data object to be profiled, such as all_data, region, or
sphere.
x_field : str
@@ -246,13 +246,18 @@
name = "%s.png" % prefix
suffix = get_image_suffix(name)
prefix = name[:name.rfind(suffix)]
+ xfn = self.profiles[0].x_field
+ if isinstance(xfn, types.TupleType):
+ xfn = xfn[1]
if not suffix:
suffix = ".png"
canvas_cls = get_canvas(name)
for uid, fig in iters:
+ if isinstance(uid, types.TupleType):
+ uid = uid[1]
canvas = canvas_cls(fig)
fn = "%s_1d-Profile_%s_%s%s" % \
- (prefix, self.profiles[0].x_field, uid, suffix)
+ (prefix, xfn, uid, suffix)
mylog.info("Saving %s", fn)
canvas.print_figure(fn)
return self
@@ -410,7 +415,8 @@
def _get_field_log(self, field_y, profile):
pf = profile.data_source.pf
- yfi = pf.field_info[field_y]
+ yf, = profile.data_source._determine_fields([field_y])
+ yfi = pf._get_field_info(*yf)
if self.x_log is None:
x_log = profile.x_log
else:
@@ -425,6 +431,7 @@
def _get_field_label(self, field, field_info):
units = field_info.get_units()
field_name = field_info.display_name
+ if isinstance(field, tuple): field = field[1]
if field_name is None:
field_name = r'$\rm{'+field+r'}$'
elif field_name.find('$') == -1:
@@ -438,8 +445,10 @@
def _get_field_title(self, field_y, profile):
pf = profile.data_source.pf
field_x = profile.x_field
- xfi = pf.field_info[field_x]
- yfi = pf.field_info[field_y]
+ xf, yf = profile.data_source._determine_fields(
+ [field_x, field_y])
+ xfi = pf._get_field_info(*xf)
+ yfi = pf._get_field_info(*yf)
x_title = self.x_title or self._get_field_label(field_x, xfi)
y_title = self.y_title.get(field_y, None) or \
self._get_field_label(field_y, yfi)
@@ -554,9 +563,11 @@
pf = profile.data_source.pf
field_x = profile.x_field
field_y = profile.y_field
- xfi = pf.field_info[field_x]
- yfi = pf.field_info[field_y]
- zfi = pf.field_info[field_z]
+ xf, yf, zf = profile.data_source._determine_fields(
+ [field_x, field_y, field_z])
+ xfi = pf._get_field_info(*xf)
+ yfi = pf._get_field_info(*yf)
+ zfi = pf._get_field_info(*zf)
x_title = self.x_title or self._get_field_label(field_x, xfi)
y_title = self.y_title or self._get_field_label(field_y, yfi)
z_title = self.z_title.get(field_z, None) or \
@@ -566,6 +577,7 @@
def _get_field_label(self, field, field_info):
units = field_info.get_units()
field_name = field_info.display_name
+ if isinstance(field, tuple): field = field[1]
if field_name is None:
field_name = r'$\rm{'+field+r'}$'
elif field_name.find('$') == -1:
@@ -578,7 +590,8 @@
def _get_field_log(self, field_z, profile):
pf = profile.data_source.pf
- zfi = pf.field_info[field_z]
+ zf, = profile.data_source._determine_fields([field_z])
+ zfi = pf._get_field_info(*zf)
if self.x_log is None:
x_log = profile.x_log
else:
@@ -659,9 +672,16 @@
if not self._plot_valid: self._setup_plots()
if mpl_kwargs is None: mpl_kwargs = {}
+ xfn = self.profile.x_field
+ yfn = self.profile.y_field
+ if isinstance(xfn, types.TupleType):
+ xfn = xfn[1]
+ if isinstance(yfn, types.TupleType):
+ yfn = yfn[1]
for f in self.profile.field_data:
- middle = "2d-Profile_%s_%s_%s" % (self.profile.x_field,
- self.profile.y_field, f)
+ _f = f
+ if isinstance(f, types.TupleType): _f = _f[1]
+ middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f)
if name is None:
prefix = self.profile.pf
name = "%s.png" % prefix
https://bitbucket.org/yt_analysis/yt/commits/7554e1e53f6a/
Changeset: 7554e1e53f6a
Branch: yt-3.0
User: jzuhone
Date: 2014-01-01 16:42:40
Summary: Fixing the reason path in the windows case
Affected #: 1 file
diff -r 73f539f7930a7f90eadf317f6cb991d977d70ba0 -r 7554e1e53f6a860db14a340a6a425cef8a984e64 setup.py
--- a/setup.py
+++ b/setup.py
@@ -82,13 +82,15 @@
"app/templates",
]
-if os.name == "posix":
- for subdir in REASON_DIRS:
+for subdir in REASON_DIRS:
+ if os.name == "nt":
+ dir_name = "yt\\gui\\reason\\html\\%s\\" % (subdir)
+ else:
dir_name = "yt/gui/reason/html/%s/" % (subdir)
- files = []
- for ext in ["js", "html", "css", "png", "ico", "gif"]:
- files += glob.glob("%s/*.%s" % (dir_name, ext))
- REASON_FILES.append((dir_name, files))
+ files = []
+ for ext in ["js", "html", "css", "png", "ico", "gif"]:
+ files += glob.glob("%s/*.%s" % (dir_name, ext))
+ REASON_FILES.append((dir_name, files))
# Verify that we have Cython installed
try:
https://bitbucket.org/yt_analysis/yt/commits/7eb26e67d052/
Changeset: 7eb26e67d052
Branch: yt-3.0
User: jzuhone
Date: 2014-01-02 02:04:02
Summary: Got halo finder working on Windows
Affected #: 4 files
diff -r 7554e1e53f6a860db14a340a6a425cef8a984e64 -r 7eb26e67d052cc658129c898b08ea7c1c0522d7f yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -2,7 +2,11 @@
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
+#if defined(WIN32) || defined(WIN64)
+#include <windows.h>
+#else
#include <sys/resource.h>
+#endif
#include <assert.h>
#include "kd.h"
#include "tipsydefs.h"
@@ -10,19 +14,41 @@
void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
{
+
+#if defined(WIN32) || defined(WIN64)
+ int secs, usecs;
+ HANDLE hProcess = GetCurrentProcess();
+ FILETIME ftCreation, ftExit, ftKernel, ftUser;
+ SYSTEMTIME stUser;
+ GetProcessTimes(hProcess, &ftCreation, &ftExit,
+ &ftKernel, &ftUser);
+ FileTimeToSystemTime(&ftUser, &stUser);
+ secs = (int)((double)stUser.wHour*3600.0 +
+ (double)stUser.wMinute*60.0 +
+ (double)stUser.wSecond);
+ usecs = (int)((double)stUser.wMilliseconds/1000.0);
+ *puMicro = usecs;
+ *puSecond = secs;
+ if (*puMicro < 0) {
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
+ kd->uSecond = secs;
+ kd->uMicro = usecs;
+#else
struct rusage ru;
getrusage(0,&ru);
*puMicro = ru.ru_utime.tv_usec - kd->uMicro;
*puSecond = ru.ru_utime.tv_sec - kd->uSecond;
if (*puMicro < 0) {
- *puMicro += 1000000;
- *puSecond -= 1;
- }
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
kd->uSecond = ru.ru_utime.tv_sec;
kd->uMicro = ru.ru_utime.tv_usec;
- }
-
+#endif
+}
int kdInitFoF(KDFOF *pkd,int nBucket,float *fPeriod)
{
diff -r 7554e1e53f6a860db14a340a6a425cef8a984e64 -r 7eb26e67d052cc658129c898b08ea7c1c0522d7f yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -13,7 +13,11 @@
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
+#if defined(WIN32) || defined(WIN64)
+#include <windows.h>
+#else
#include <sys/resource.h>
+#endif
#include <assert.h>
#include "kd.h"
#include "hop_numpy.h"
@@ -26,6 +30,28 @@
void kdTime(KD kd,int *puSecond,int *puMicro)
{
+
+#if defined(WIN32) || defined(WIN64)
+ int secs, usecs;
+ HANDLE hProcess = GetCurrentProcess();
+ FILETIME ftCreation, ftExit, ftKernel, ftUser;
+ SYSTEMTIME stUser;
+ GetProcessTimes(hProcess, &ftCreation, &ftExit,
+ &ftKernel, &ftUser);
+ FileTimeToSystemTime(&ftUser, &stUser);
+ secs = (int)((double)stUser.wHour*3600.0 +
+ (double)stUser.wMinute*60.0 +
+ (double)stUser.wSecond);
+ usecs = (int)((double)stUser.wMilliseconds/1000.0);
+ *puMicro = usecs;
+ *puSecond = secs;
+ if (*puMicro < 0) {
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
+ kd->uSecond = secs;
+ kd->uMicro = usecs;
+#else
struct rusage ru;
getrusage(0,&ru);
@@ -37,9 +63,9 @@
}
kd->uSecond = ru.ru_utime.tv_sec;
kd->uMicro = ru.ru_utime.tv_usec;
- }
-
-
+#endif
+}
+
int kdInit(KD *pkd,int nBucket)
{
KD kd;
diff -r 7554e1e53f6a860db14a340a6a425cef8a984e64 -r 7eb26e67d052cc658129c898b08ea7c1c0522d7f yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -10,8 +10,7 @@
config.add_subpackage("absorption_spectrum")
config.add_subpackage("coordinate_transformation")
config.add_subpackage("cosmological_observation")
- if os.name == "posix":
- config.add_subpackage("halo_finding")
+ config.add_subpackage("halo_finding")
config.add_subpackage("halo_mass_function")
config.add_subpackage("halo_merger_tree")
config.add_subpackage("halo_profiler")
diff -r 7554e1e53f6a860db14a340a6a425cef8a984e64 -r 7eb26e67d052cc658129c898b08ea7c1c0522d7f yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -128,9 +128,8 @@
available_analysis_modules = get_available_modules()
# Import our analysis modules
-if os.name == "posix":
- from yt.analysis_modules.halo_finding.api import \
- HaloFinder
+from yt.analysis_modules.halo_finding.api import \
+ HaloFinder
from yt.utilities.definitions import \
axis_names, x_dict, y_dict, inv_axis_names
https://bitbucket.org/yt_analysis/yt/commits/dfbf668989de/
Changeset: dfbf668989de
Branch: yt-3.0
User: jzuhone
Date: 2014-01-02 02:19:31
Summary: Didn't need this here
Affected #: 1 file
diff -r 7eb26e67d052cc658129c898b08ea7c1c0522d7f -r dfbf668989de95d06c0f1b642e7cd1d1d9d27111 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -16,7 +16,6 @@
import base64
import types
-import os
from collections import defaultdict
from functools import wraps
https://bitbucket.org/yt_analysis/yt/commits/400e3b9a4f44/
Changeset: 400e3b9a4f44
Branch: yt-3.0
User: jzuhone
Date: 2014-01-02 03:48:17
Summary: Should fix the test failures
Affected #: 1 file
diff -r dfbf668989de95d06c0f1b642e7cd1d1d9d27111 -r 400e3b9a4f44815aabec53b79a4fccbe853e57aa yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -211,9 +211,9 @@
y_vals = data_object[self.y_name].ravel().astype('float64')
z_vals = data_object[self.z_name].ravel().astype('float64')
- x_i = np.digitize(x_vals, self.x_bins).astype("int32") - 1
- y_i = np.digitize(y_vals, self.y_bins).astype("int32") - 1
- z_i = np.digitize(z_vals, self.z_bins).astype("int32") - 1
+ x_i = np.digitize(x_vals, self.x_bins).astype("int") - 1
+ y_i = np.digitize(y_vals, self.y_bins).astype("int") - 1
+ z_i = np.digitize(z_vals, self.z_bins).astype("int") - 1
if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
https://bitbucket.org/yt_analysis/yt/commits/c7953a98dac9/
Changeset: c7953a98dac9
Branch: yt-3.0
User: jzuhone
Date: 2014-01-02 03:57:29
Summary: Don't need this anymore
Affected #: 1 file
diff -r 400e3b9a4f44815aabec53b79a4fccbe853e57aa -r c7953a98dac9d8cb8f5abc141d5aa0e98947e0bb yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python
import setuptools
-import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
https://bitbucket.org/yt_analysis/yt/commits/a558574527bd/
Changeset: a558574527bd
Branch: yt-3.0
User: jzuhone
Date: 2014-01-07 16:11:35
Summary: Implementing Matt's suggestions
Affected #: 3 files
diff -r c7953a98dac9d8cb8f5abc141d5aa0e98947e0bb -r a558574527bd97f8feff860c75cc7f515fd564c0 setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,10 +83,8 @@
]
for subdir in REASON_DIRS:
- if os.name == "nt":
- dir_name = "yt\\gui\\reason\\html\\%s\\" % (subdir)
- else:
- dir_name = "yt/gui/reason/html/%s/" % (subdir)
+ dir_name = os.path.join("yt", "gui", "reason", "html", subdir)
+ print dir_name
files = []
for ext in ["js", "html", "css", "png", "ico", "gif"]:
files += glob.glob("%s/*.%s" % (dir_name, ext))
diff -r c7953a98dac9d8cb8f5abc141d5aa0e98947e0bb -r a558574527bd97f8feff860c75cc7f515fd564c0 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -71,7 +71,7 @@
def add_grids(self, grids):
gles = np.array([g.LeftEdge for g in grids])
gres = np.array([g.RightEdge for g in grids])
- gids = np.array([g.id for g in grids]).astype("int64")
+ gids = np.array([g.id for g in grids], dtype="int64")
add_pygrids(self.trunk, gids.size, gles, gres, gids,
self.comm_rank, self.comm_size)
del gles, gres, gids, grids
diff -r c7953a98dac9d8cb8f5abc141d5aa0e98947e0bb -r a558574527bd97f8feff860c75cc7f515fd564c0 yt/utilities/png_writer.py
--- a/yt/utilities/png_writer.py
+++ b/yt/utilities/png_writer.py
@@ -11,7 +11,7 @@
#-----------------------------------------------------------------------------
import matplotlib._png as _png
-import StringIO
+import cStringIO
def write_png(buffer, filename, dpi=100):
width = buffer.shape[1]
@@ -21,7 +21,7 @@
def write_png_to_string(buffer, dpi=100, gray=0):
width = buffer.shape[1]
height = buffer.shape[0]
- fileobj = StringIO.StringIO()
+ fileobj = cStringIO.StringIO()
_png.write_png(buffer, width, height, fileobj, dpi)
png_str = fileobj.getvalue()
fileobj.close()
https://bitbucket.org/yt_analysis/yt/commits/bc141a60ea45/
Changeset: bc141a60ea45
Branch: yt-3.0
User: jzuhone
Date: 2014-01-07 17:52:08
Summary: Removing print statement
Affected #: 1 file
diff -r a558574527bd97f8feff860c75cc7f515fd564c0 -r bc141a60ea4580284d1c518d5621a13f4d7d5ae7 setup.py
--- a/setup.py
+++ b/setup.py
@@ -84,7 +84,6 @@
for subdir in REASON_DIRS:
dir_name = os.path.join("yt", "gui", "reason", "html", subdir)
- print dir_name
files = []
for ext in ["js", "html", "css", "png", "ico", "gif"]:
files += glob.glob("%s/*.%s" % (dir_name, ext))
https://bitbucket.org/yt_analysis/yt/commits/b14aeeb32c24/
Changeset: b14aeeb32c24
Branch: yt-3.0
User: MatthewTurk
Date: 2014-01-07 19:48:01
Summary: Merged in jzuhone/yt-windows/yt-3.0 (pull request #682)
yt compatibility with Windows
Affected #: 26 files
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 setup.py
--- a/setup.py
+++ b/setup.py
@@ -83,7 +83,7 @@
]
for subdir in REASON_DIRS:
- dir_name = "yt/gui/reason/html/%s/" % (subdir)
+ dir_name = os.path.join("yt", "gui", "reason", "html", subdir)
files = []
for ext in ["js", "html", "css", "png", "ico", "gif"]:
files += glob.glob("%s/*.%s" % (dir_name, ext))
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/analysis_modules/halo_finding/fof/kd.c
--- a/yt/analysis_modules/halo_finding/fof/kd.c
+++ b/yt/analysis_modules/halo_finding/fof/kd.c
@@ -2,7 +2,11 @@
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
+#if defined(WIN32) || defined(WIN64)
+#include <windows.h>
+#else
#include <sys/resource.h>
+#endif
#include <assert.h>
#include "kd.h"
#include "tipsydefs.h"
@@ -10,19 +14,41 @@
void kdTimeFoF(KDFOF kd,int *puSecond,int *puMicro)
{
+
+#if defined(WIN32) || defined(WIN64)
+ int secs, usecs;
+ HANDLE hProcess = GetCurrentProcess();
+ FILETIME ftCreation, ftExit, ftKernel, ftUser;
+ SYSTEMTIME stUser;
+ GetProcessTimes(hProcess, &ftCreation, &ftExit,
+ &ftKernel, &ftUser);
+ FileTimeToSystemTime(&ftUser, &stUser);
+ secs = (int)((double)stUser.wHour*3600.0 +
+ (double)stUser.wMinute*60.0 +
+ (double)stUser.wSecond);
+ usecs = (int)((double)stUser.wMilliseconds/1000.0);
+ *puMicro = usecs;
+ *puSecond = secs;
+ if (*puMicro < 0) {
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
+ kd->uSecond = secs;
+ kd->uMicro = usecs;
+#else
struct rusage ru;
getrusage(0,&ru);
*puMicro = ru.ru_utime.tv_usec - kd->uMicro;
*puSecond = ru.ru_utime.tv_sec - kd->uSecond;
if (*puMicro < 0) {
- *puMicro += 1000000;
- *puSecond -= 1;
- }
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
kd->uSecond = ru.ru_utime.tv_sec;
kd->uMicro = ru.ru_utime.tv_usec;
- }
-
+#endif
+}
int kdInitFoF(KDFOF *pkd,int nBucket,float *fPeriod)
{
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/analysis_modules/halo_finding/hop/hop_kd.c
--- a/yt/analysis_modules/halo_finding/hop/hop_kd.c
+++ b/yt/analysis_modules/halo_finding/hop/hop_kd.c
@@ -13,7 +13,11 @@
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
+#if defined(WIN32) || defined(WIN64)
+#include <windows.h>
+#else
#include <sys/resource.h>
+#endif
#include <assert.h>
#include "kd.h"
#include "hop_numpy.h"
@@ -26,6 +30,28 @@
void kdTime(KD kd,int *puSecond,int *puMicro)
{
+
+#if defined(WIN32) || defined(WIN64)
+ int secs, usecs;
+ HANDLE hProcess = GetCurrentProcess();
+ FILETIME ftCreation, ftExit, ftKernel, ftUser;
+ SYSTEMTIME stUser;
+ GetProcessTimes(hProcess, &ftCreation, &ftExit,
+ &ftKernel, &ftUser);
+ FileTimeToSystemTime(&ftUser, &stUser);
+ secs = (int)((double)stUser.wHour*3600.0 +
+ (double)stUser.wMinute*60.0 +
+ (double)stUser.wSecond);
+ usecs = (int)((double)stUser.wMilliseconds/1000.0);
+ *puMicro = usecs;
+ *puSecond = secs;
+ if (*puMicro < 0) {
+ *puMicro += 1000000;
+ *puSecond -= 1;
+ }
+ kd->uSecond = secs;
+ kd->uMicro = usecs;
+#else
struct rusage ru;
getrusage(0,&ru);
@@ -37,9 +63,9 @@
}
kd->uSecond = ru.ru_utime.tv_sec;
kd->uMicro = ru.ru_utime.tv_usec;
- }
-
-
+#endif
+}
+
int kdInit(KD *pkd,int nBucket)
{
KD kd;
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python
import setuptools
-
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('analysis_modules', parent_package, top_path)
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -22,7 +22,7 @@
from yt.utilities.lib import \
VoxelTraversal, planar_points_in_volume, find_grids_in_inclined_box, \
grid_points_in_volume
-from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace
+from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace
from yt.utilities.orientation import Orientation
from .data_containers import \
YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -18,8 +18,12 @@
from libc.string cimport memcpy
import data_structures
-cdef extern from "alloca.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef extern from "artio.h":
ctypedef struct artio_fileset_handle "artio_fileset" :
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -23,8 +23,12 @@
from fp_utils cimport *
from .oct_container cimport Oct, OctAllocationContainer, OctreeContainer
-cdef extern from "alloca.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef inline int gind(int i, int j, int k, int dims[3]):
# The ordering is such that we want i to vary the slowest in this instance,
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/geometry/particle_smooth.pxd
--- a/yt/geometry/particle_smooth.pxd
+++ b/yt/geometry/particle_smooth.pxd
@@ -24,8 +24,12 @@
from oct_container cimport Oct, OctAllocationContainer, OctreeContainer
from .particle_deposit cimport sph_kernel, gind
-cdef extern from "alloca.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef struct NeighborList
cdef struct NeighborList:
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/gui/reason/pannable_map.py
--- a/yt/gui/reason/pannable_map.py
+++ b/yt/gui/reason/pannable_map.py
@@ -19,7 +19,8 @@
from yt.visualization.image_writer import apply_colormap
from yt.visualization.fixed_resolution import FixedResolutionBuffer
-from yt.utilities.lib import write_png_to_string, get_color_bounds
+from yt.utilities.lib import get_color_bounds
+from yt.utilities.png_writer import write_png_to_string
import yt.extern.bottle as bottle
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -60,7 +60,7 @@
mylog.debug("SIGUSR1 registered for traceback printing")
signal.signal(signal.SIGUSR2, signal_ipython)
mylog.debug("SIGUSR2 registered for IPython Insertion")
-except ValueError: # Not in main thread
+except (ValueError, AttributeError) as e: # Not in main thread
pass
class SetExceptionHandling(argparse.Action):
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -71,7 +71,7 @@
def add_grids(self, grids):
gles = np.array([g.LeftEdge for g in grids])
gres = np.array([g.RightEdge for g in grids])
- gids = np.array([g.id for g in grids])
+ gids = np.array([g.id for g in grids], dtype="int64")
add_pygrids(self.trunk, gids.size, gles, gres, gids,
self.comm_rank, self.comm_size)
del gles, gres, gids, grids
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/__init__.py
--- a/yt/utilities/lib/__init__.py
+++ b/yt/utilities/lib/__init__.py
@@ -1,3 +1,4 @@
+import os
"""
Compatibility module
@@ -20,7 +21,7 @@
from .Interpolators import *
from .misc_utilities import *
from .Octree import *
-from .png_writer import *
+from .image_utilities import *
from .PointsInVolume import *
from .QuadTree import *
from .RayIntegrators import *
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/alt_ray_tracers.pyx
--- a/yt/utilities/lib/alt_ray_tracers.pyx
+++ b/yt/utilities/lib/alt_ray_tracers.pyx
@@ -65,7 +65,7 @@
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
-def clyindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1,
+def cylindrical_ray_trace(np.ndarray[np.float64_t, ndim=1] p1,
np.ndarray[np.float64_t, ndim=1] p2,
np.ndarray[np.float64_t, ndim=2] left_edges,
np.ndarray[np.float64_t, ndim=2] right_edges):
@@ -152,7 +152,7 @@
np.argwhere(tmmright).flat,
np.argwhere(tpmright).flat,]))
if 0 == inds.shape[0]:
- inds = np.arange(I)
+ inds = np.arange(np.int64(I))
thetaleft = np.empty(I)
thetaleft.fill(p1[2])
thetaright = np.empty(I)
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/fortran_reader.pyx
--- a/yt/utilities/lib/fortran_reader.pyx
+++ b/yt/utilities/lib/fortran_reader.pyx
@@ -29,8 +29,12 @@
void FIX_LONG( unsigned )
void FIX_FLOAT( float )
-cdef extern from "alloca.h":
- void *alloca(int)
+IF UNAME_SYSNAME == "Windows":
+ cdef extern from "malloc.h":
+ void *alloca(int)
+ELSE:
+ cdef extern from "alloca.h":
+ void *alloca(int)
cdef extern from "stdio.h":
cdef int SEEK_SET
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/image_utilities.pyx
--- /dev/null
+++ b/yt/utilities/lib/image_utilities.pyx
@@ -0,0 +1,39 @@
+"""
+Utilities for images
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+cimport numpy as np
+cimport cython
+from fp_utils cimport iclip
+
+def add_points_to_image(
+ np.ndarray[np.uint8_t, ndim=3] buffer,
+ np.ndarray[np.float64_t, ndim=1] px,
+ np.ndarray[np.float64_t, ndim=1] py,
+ np.float64_t pv):
+ cdef int i, j, k, pi
+ cdef int np = px.shape[0]
+ cdef int xs = buffer.shape[0]
+ cdef int ys = buffer.shape[1]
+ cdef int v
+ v = iclip(<int>(pv * 255), 0, 255)
+ for pi in range(np):
+ j = <int> (xs * px[pi])
+ i = <int> (ys * py[pi])
+ for k in range(3):
+ buffer[i, j, k] = 0
+ return
+ #for i in range(xs):
+ # for j in range(ys):
+ # for k in range(3):
+ # v = buffer[i, j, k]
+ # buffer[i, j, k] = iclip(v, 0, 255)
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/png_writer.pyx
--- a/yt/utilities/lib/png_writer.pyx
+++ /dev/null
@@ -1,317 +0,0 @@
-"""
-A light interface to libpng
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-cimport numpy as np
-cimport cython
-from libc.stdlib cimport malloc, realloc, free
-from libc.string cimport memcpy
-from cpython.string cimport PyString_FromStringAndSize
-from fp_utils cimport imax, fmax, imin, fmin, iclip, fclip
-
-from libc.stdio cimport fopen, fclose, FILE
-
-cdef extern from "stdlib.h":
- # NOTE that size_t might not be int
- void *alloca(int)
-
-# Idiom for accessing Python files, from Cython FAQ
-# First, declare the Python macro to access files:
-cdef extern from "Python.h":
- ctypedef struct FILE
- FILE* PyFile_AsFile(object)
- void fprintf(FILE* f, char* s, char* s)
-
-cdef extern from "png.h":
- ctypedef unsigned long png_uint_32
- ctypedef long png_int_32
- ctypedef unsigned short png_uint_16
- ctypedef short png_int_16
- ctypedef unsigned char png_byte
- ctypedef void *png_voidp
- ctypedef png_byte *png_bytep
- ctypedef png_uint_32 *png_uint_32p
- ctypedef png_int_32 *png_int_32p
- ctypedef png_uint_16 *png_uint_16p
- ctypedef png_int_16 *png_int_16p
- ctypedef char *png_charp
- ctypedef char *png_const_charp
- ctypedef FILE *png_FILE_p
-
- ctypedef struct png_struct:
- png_voidp io_ptr
- ctypedef png_struct *png_structp
-
- ctypedef struct png_info:
- pass
- ctypedef png_info *png_infop
-
- ctypedef struct png_color_8:
- png_byte red
- png_byte green
- png_byte blue
- png_byte gray
- png_byte alpha
- ctypedef png_color_8 *png_color_8p
-
- cdef png_const_charp PNG_LIBPNG_VER_STRING
-
- # Note that we don't support error or warning functions
- png_structp png_create_write_struct(
- png_const_charp user_png_ver, png_voidp error_ptr,
- void *error_fn, void *warn_fn)
-
- png_infop png_create_info_struct(png_structp png_ptr)
-
- void png_init_io(png_structp png_ptr, png_FILE_p fp)
-
- void png_set_IHDR(png_structp png_ptr, png_infop info_ptr,
- png_uint_32 width, png_uint_32 height, int bit_depth,
- int color_type, int interlace_method, int compression_method,
- int filter_method)
-
- cdef int PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE
- cdef int PNG_COLOR_TYPE_GRAY, PNG_INTERLACE_ADAM7
- cdef int PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE
-
- void png_set_pHYs(png_structp png_ptr, png_infop info_ptr,
- png_uint_32 res_x, png_uint_32 res_y, int unit_type)
-
- cdef int PNG_RESOLUTION_METER
-
- void png_set_sBIT(png_structp png_ptr, png_infop info_ptr,
- png_color_8p sig_bit)
-
- ctypedef void (*png_rw_ptr) (png_structp, png_bytep, size_t)
- ctypedef void (*png_flush_ptr) (png_structp)
- void png_set_write_fn(png_structp png_ptr, png_voidp io_ptr,
- png_rw_ptr write_data_fn,
- png_flush_ptr output_flush_fn)
- png_voidp png_get_io_ptr (png_structp png_ptr)
-
- void png_write_info(png_structp png_ptr, png_infop info_ptr)
- void png_set_rows(png_structp png_ptr, png_infop info_ptr,
- png_bytep *row_pointers)
- void png_write_image(png_structp png_ptr, png_bytep *image)
- void png_write_end(png_structp png_ptr, png_infop info_ptr)
- void png_write_png(png_structp png_ptr, png_infop info_ptr,
- int transforms, png_voidp params)
- cdef int PNG_TRANSFORM_IDENTITY
-
- void png_destroy_write_struct(
- png_structp *png_ptr_ptr, png_infop *info_ptr_ptr)
-
-def write_png_to_file(np.ndarray[np.uint8_t, ndim=3] buffer,
- object py_fileobj, int dpi=100,
- int close = 0):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
- cdef FILE *fileobj = PyFile_AsFile(py_fileobj)
-
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- png_init_io(png_ptr, fileobj)
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- png_write_info(png_ptr, info_ptr)
- png_write_image(png_ptr, row_pointers)
- png_write_end(png_ptr, info_ptr)
-
- if close == 1: fclose(fileobj)
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
-def write_png(np.ndarray[np.uint8_t, ndim=3] buffer,
- char *filename, int dpi=100):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
-
- cdef FILE* fileobj = fopen(filename, "wb")
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- png_init_io(png_ptr, fileobj)
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- PNG_COLOR_TYPE_RGB_ALPHA, PNG_INTERLACE_NONE,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- png_write_info(png_ptr, info_ptr)
- png_write_image(png_ptr, row_pointers)
- png_write_end(png_ptr, info_ptr)
-
- fclose(fileobj)
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
-
-# Much of this is inspired by and translated from this StackOverflow question:
-# http://stackoverflow.com/questions/1821806/how-to-encode-png-to-buffer-using-libpng
-
-cdef public struct mem_encode:
- char *buffer
- size_t size
-
-cdef public void my_png_write_data(png_structp png_ptr, png_bytep data,
- size_t length):
- cdef png_voidp temp = png_get_io_ptr(png_ptr)
- cdef mem_encode *p = <mem_encode *> temp
- cdef size_t nsize = p.size + length
- if p.buffer != NULL:
- p.buffer = <char *> realloc(p.buffer, nsize)
- else:
- p.buffer = <char *> malloc(nsize)
- memcpy(p.buffer + p.size, data, length)
- p.size += length
-
-cdef public void my_png_flush(png_structp png_ptr):
- return
-
-def write_png_to_string(np.ndarray[np.uint8_t, ndim=3] buffer, int dpi=100,
- int gray = 0):
-
- # This is something of a translation of the matplotlib _png module
- cdef png_byte *pix_buffer = <png_byte *> buffer.data
- cdef int width = buffer.shape[1]
- cdef int height = buffer.shape[0]
-
- cdef png_bytep *row_pointers
- cdef png_structp png_ptr
- cdef png_infop info_ptr
-
- cdef png_color_8 sig_bit
- cdef png_uint_32 row
-
- row_pointers = <png_bytep *> alloca(sizeof(png_bytep) * height)
-
- for row in range(height):
- row_pointers[row] = pix_buffer + row * width * 4
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL)
- info_ptr = png_create_info_struct(png_ptr)
-
- # Um we are ignoring setjmp sorry guys
-
- cdef int im_type, interlace_type
- interlace_type = PNG_INTERLACE_NONE
- if gray == 0:
- im_type = PNG_COLOR_TYPE_RGB_ALPHA
- sig_bit.gray = 0
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 8
- elif gray == 1:
- im_type = PNG_COLOR_TYPE_GRAY
- sig_bit.gray = 8
- sig_bit.red = sig_bit.green = sig_bit.blue = sig_bit.alpha = 0
- else:
- raise RuntimeError
-
- png_set_IHDR(png_ptr, info_ptr, width, height, 8,
- im_type, interlace_type,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE)
-
- cdef size_t dots_per_meter = <size_t> (dpi / (2.54 / 100.0))
- png_set_pHYs(png_ptr, info_ptr, dots_per_meter, dots_per_meter,
- PNG_RESOLUTION_METER)
-
-
- png_set_sBIT(png_ptr, info_ptr, &sig_bit)
-
- cdef mem_encode state
- state.buffer = NULL
- state.size = 0
-
- png_set_write_fn(png_ptr, <png_voidp> &state, my_png_write_data, NULL)
- png_set_rows(png_ptr, info_ptr, row_pointers)
- png_write_png(png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL)
-
- png_destroy_write_struct(&png_ptr, &info_ptr)
-
- pp = PyString_FromStringAndSize(state.buffer, state.size)
- if state.buffer != NULL: free(state.buffer)
- return pp
-
-def add_points_to_image(
- np.ndarray[np.uint8_t, ndim=3] buffer,
- np.ndarray[np.float64_t, ndim=1] px,
- np.ndarray[np.float64_t, ndim=1] py,
- np.float64_t pv):
- cdef int i, j, k, pi
- cdef int np = px.shape[0]
- cdef int xs = buffer.shape[0]
- cdef int ys = buffer.shape[1]
- cdef int v
- v = iclip(<int>(pv * 255), 0, 255)
- for pi in range(np):
- j = <int> (xs * px[pi])
- i = <int> (ys * py[pi])
- for k in range(3):
- buffer[i, j, k] = 0
- return
- #for i in range(xs):
- # for j in range(ys):
- # for k in range(3):
- # v = buffer[i, j, k]
- # buffer[i, j, k] = iclip(v, 0, 255)
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/setup.py
--- a/yt/utilities/lib/setup.py
+++ b/yt/utilities/lib/setup.py
@@ -5,17 +5,14 @@
from yt.utilities.setup import \
check_for_dependencies
-
-def check_for_png():
- return check_for_dependencies("PNG_DIR", "png.cfg", "png.h", "png")
-
-
def check_for_openmp():
# Create a temporary directory
tmpdir = tempfile.mkdtemp()
curdir = os.getcwd()
exit_code = 1
+ if os.name == 'nt': return False
+
try:
os.chdir(tmpdir)
@@ -49,7 +46,6 @@
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('lib',parent_package,top_path)
- png_inc, png_lib = check_for_png()
if check_for_openmp() == True:
omp_args = ['-fopenmp']
else:
@@ -101,13 +97,10 @@
config.add_extension("Octree",
["yt/utilities/lib/Octree.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
- config.add_extension("png_writer",
- ["yt/utilities/lib/png_writer.pyx"],
- define_macros=[("PNG_SETJMP_NOT_SUPPORTED", True)],
- include_dirs=[png_inc],
- library_dirs=[png_lib],
- libraries=["m", "png"],
- depends=["yt/utilities/lib/fp_utils.pxd"]),
+ config.add_extension("image_utilities",
+ ["yt/utilities/lib/image_utilities.pyx"],
+ libraries=["m"],
+ depends=["yt/utilities/lib/fp_utils.pxd"]),
config.add_extension("PointsInVolume",
["yt/utilities/lib/PointsInVolume.pyx"],
libraries=["m"], depends=["yt/utilities/lib/fp_utils.pxd"])
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/lib/tests/test_alt_ray_tracers.py
--- a/yt/utilities/lib/tests/test_alt_ray_tracers.py
+++ b/yt/utilities/lib/tests/test_alt_ray_tracers.py
@@ -8,7 +8,7 @@
from numpy.testing import assert_array_equal, assert_array_almost_equal
from yt.testing import amrspace
-from yt.utilities.lib.alt_ray_tracers import clyindrical_ray_trace, _cyl2cart
+from yt.utilities.lib.alt_ray_tracers import cylindrical_ray_trace, _cyl2cart
left_grid = right_grid = amr_levels = center_grid = data = None
@@ -57,13 +57,13 @@
assert_true(np.all(bupper >= arr))
-def test_clyindrical_ray_trace():
+def test_cylindrical_ray_trace():
for pair in point_pairs:
p1, p2 = pair
p1cart, p2cart = _cyl2cart(pair)
pathlen = np.sqrt(np.sum((p2cart - p1cart)**2))
- t, s, rztheta, inds = clyindrical_ray_trace(p1, p2, left_grid, right_grid)
+ t, s, rztheta, inds = cylindrical_ray_trace(p1, p2, left_grid, right_grid)
npoints = len(t)
yield check_monotonic_inc, t
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/linear_interpolators.py
--- a/yt/utilities/linear_interpolators.py
+++ b/yt/utilities/linear_interpolators.py
@@ -211,9 +211,9 @@
y_vals = data_object[self.y_name].ravel().astype('float64')
z_vals = data_object[self.z_name].ravel().astype('float64')
- x_i = np.digitize(x_vals, self.x_bins) - 1
- y_i = np.digitize(y_vals, self.y_bins) - 1
- z_i = np.digitize(z_vals, self.z_bins) - 1
+ x_i = np.digitize(x_vals, self.x_bins).astype("int") - 1
+ y_i = np.digitize(y_vals, self.y_bins).astype("int") - 1
+ z_i = np.digitize(z_vals, self.z_bins).astype("int") - 1
if np.any((x_i == -1) | (x_i == len(self.x_bins)-1)) \
or np.any((y_i == -1) | (y_i == len(self.y_bins)-1)) \
or np.any((z_i == -1) | (z_i == len(self.z_bins)-1)):
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/png_writer.py
--- /dev/null
+++ b/yt/utilities/png_writer.py
@@ -0,0 +1,29 @@
+"""
+Writing PNGs
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import matplotlib._png as _png
+import cStringIO
+
+def write_png(buffer, filename, dpi=100):
+ width = buffer.shape[1]
+ height = buffer.shape[0]
+ _png.write_png(buffer, width, height, filename, dpi)
+
+def write_png_to_string(buffer, dpi=100, gray=0):
+ width = buffer.shape[1]
+ height = buffer.shape[0]
+ fileobj = cStringIO.StringIO()
+ _png.write_png(buffer, width, height, fileobj, dpi)
+ png_str = fileobj.getvalue()
+ fileobj.close()
+ return png_str
+
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/utilities/setup.py
--- a/yt/utilities/setup.py
+++ b/yt/utilities/setup.py
@@ -131,7 +131,7 @@
elif os.path.exists(cfg):
return get_location_from_cfg(cfg)
# Now we see if ctypes can help us
- if os.name == 'posix':
+ if os.name == 'posix' or os.name == 'nt':
target_inc, target_lib = get_location_from_ctypes(header, library)
if None not in (target_inc, target_lib):
print(
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/visualization/image_panner/vm_panner.py
--- a/yt/visualization/image_panner/vm_panner.py
+++ b/yt/visualization/image_panner/vm_panner.py
@@ -287,7 +287,7 @@
self.transport = transport
def __call__(self, val):
- from yt.utilities.lib import write_png_to_string
+ from yt.utilities.png_writer import write_png_to_string
from yt.visualization.image_writer import map_to_colors
image = np.log10(val)
mi = np.nanmin(image[~np.isinf(image)])
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/visualization/image_writer.py
--- a/yt/visualization/image_writer.py
+++ b/yt/visualization/image_writer.py
@@ -19,6 +19,7 @@
from yt.funcs import *
import _colormap_data as cmd
import yt.utilities.lib as au
+import yt.utilities.png_writer as pw
import __builtin__
def scale_image(image, mi=None, ma=None):
@@ -106,7 +107,7 @@
alpha_channel = scale_image(alpha_channel)
image = np.array([red_channel, green_channel, blue_channel, alpha_channel])
image = image.transpose().copy() # Have to make sure it's contiguous
- au.write_png(image, fn)
+ pw.write_png(image, fn)
def write_bitmap(bitmap_array, filename, max_val = None, transpose=False):
r"""Write out a bitmapped image directly to a PNG file.
@@ -151,9 +152,9 @@
if transpose:
bitmap_array = bitmap_array.swapaxes(0,1)
if filename is not None:
- au.write_png(bitmap_array.copy(), filename)
+ pw.write_png(bitmap_array.copy(), filename)
else:
- return au.write_png_to_string(bitmap_array.copy())
+ return pw.write_png_to_string(bitmap_array.copy())
return bitmap_array
def write_image(image, filename, color_bounds = None, cmap_name = "algae", func = lambda x: x):
@@ -195,7 +196,7 @@
mylog.info("Using only channel 1 of supplied image")
image = image[:,:,0]
to_plot = apply_colormap(image, color_bounds = color_bounds, cmap_name = cmap_name)
- au.write_png(to_plot, filename)
+ pw.write_png(to_plot, filename)
return to_plot
def apply_colormap(image, color_bounds = None, cmap_name = 'algae', func=lambda x: x):
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -41,7 +41,7 @@
from yt.funcs import \
mylog, iterable, ensure_list, fix_axis, \
get_ipython_api_version
-from yt.utilities.lib import write_png_to_string
+from yt.utilities.png_writer import write_png_to_string
from yt.utilities.definitions import \
x_dict, y_dict, \
axis_names, axis_labels, \
diff -r 36dc8027678fefc9d97995821f57585dc2f0fda3 -r b14aeeb32c24da11e8183514af80f14e8c5b5a55 yt/visualization/profile_plotter.py
--- a/yt/visualization/profile_plotter.py
+++ b/yt/visualization/profile_plotter.py
@@ -35,8 +35,7 @@
write_image, apply_colormap
from yt.data_objects.profiles import \
create_profile
-from yt.utilities.lib import \
- write_png_to_string
+from yt.utilities.png_writer import write_png_to_string
from yt.data_objects.profiles import \
BinnedProfile1D, \
BinnedProfile2D
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list