[yt-svn] commit/yt-3.0: 43 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Mon Sep 30 04:13:52 PDT 2013
43 new commits in yt-3.0:
https://bitbucket.org/yt_analysis/yt-3.0/commits/b2f1f487de1f/
Changeset: b2f1f487de1f
Branch: yt
User: MatthewTurk
Date: 2012-07-12 23:30:41
Summary: Adding a callback for halo analysis inside the Rockstar halo property
calculation. Requires patch submitted upstream.
Affected #: 1 file
diff -r 2df4bc2c8e640b0b31c94a8573e66dd8908af784 -r b2f1f487de1fab940ada3601882165745b49da7b yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -34,9 +34,27 @@
np.int64_t id
float pos[6]
+ctypedef struct particleflat:
+ np.int64_t id
+ float pos_x
+ float pos_y
+ float pos_z
+ float vel_x
+ float vel_y
+ float vel_z
+
+cdef import from "halo.h":
+ struct halo:
+ np.int64_t id
+ float pos[6], corevel[3], bulkvel[3]
+ float m, r, child_r, mgrav, vmax, rvmax, rs, vrms, J[3], energy, spin
+ np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+ float min_pos_err, min_vel_err, min_bulkvel_err
+
cdef import from "io_generic.h":
ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p)
- void set_load_particles_generic(LPG func)
+ ctypedef void (*AHG) (halo *h, particle *hp)
+ void set_load_particles_generic(LPG func, AHG afunc)
cdef import from "rockstar.h":
void rockstar(float *bounds, np.int64_t manual_subs)
@@ -238,6 +256,12 @@
cdef class RockstarInterface
+cdef void rh_analyze_halo(halo *h, particle *hp):
+ cdef particleflat[:] pslice
+ pslice = <particleflat[:h.num_p]> (<particleflat *>hp)
+ parray = np.asarray(pslice)
+ # This is where we call our functions
+
cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
print 'reading from particle filename %s'%filename # should print ./inline.0
cdef np.float64_t conv[6], left_edge[6]
@@ -345,7 +369,8 @@
setup_config()
rh = self
cdef LPG func = rh_read_particles
- set_load_particles_generic(func)
+ cdef AHG afunc = rh_analyze_halo
+ set_load_particles_generic(func, afunc)
def call_rockstar(self):
read_particles("generic")
https://bitbucket.org/yt_analysis/yt-3.0/commits/b7e5a65fad3f/
Changeset: b7e5a65fad3f
Branch: yt
User: MatthewTurk
Date: 2012-07-12 23:45:38
Summary: Initial pass at callbacks, but I think long-term we need to make a halo proxy
object.
Affected #: 1 file
diff -r b2f1f487de1fab940ada3601882165745b49da7b -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -260,6 +260,8 @@
cdef particleflat[:] pslice
pslice = <particleflat[:h.num_p]> (<particleflat *>hp)
parray = np.asarray(pslice)
+ for cb in rh.callbacks:
+ cb(rh.pf, parray)
# This is where we call our functions
cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
@@ -311,6 +313,7 @@
cdef int rank
cdef int size
cdef public int block_ratio
+ cdef public object callbacks
def __cinit__(self, pf, data_source):
self.pf = pf
@@ -322,7 +325,8 @@
int num_writers = 1,
int writing_port = -1, int block_ratio = 1,
int periodic = 1, int num_snaps = 1,
- int min_halo_size = 25, outbase = "None"):
+ int min_halo_size = 25, outbase = "None",
+ callbacks = None):
global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -348,6 +352,8 @@
NUM_BLOCKS = num_readers
MIN_HALO_OUTPUT_SIZE=min_halo_size
self.block_ratio = block_ratio
+ if callbacks is None: callbacks = []
+ self.callbacks = callbacks
h0 = self.pf.hubble_constant
Ol = self.pf.omega_lambda
https://bitbucket.org/yt_analysis/yt-3.0/commits/bc9399e08adb/
Changeset: bc9399e08adb
Branch: yt
User: MatthewTurk
Date: 2013-08-19 21:17:46
Summary: Merging from over a year of work.
Affected #: 319 files
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 .hgchurn
--- /dev/null
+++ b/.hgchurn
@@ -0,0 +1,19 @@
+stephenskory at yahoo.com = s at skory.us
+"Stephen Skory stephenskory at yahoo.com" = s at skory.us
+yuan at astro.columbia.edu = bear0980 at gmail.com
+juxtaposicion at gmail.com = cemoody at ucsc.edu
+chummels at gmail.com = chummels at astro.columbia.edu
+jwise at astro.princeton.edu = jwise at physics.gatech.edu
+sam.skillman at gmail.com = samskillman at gmail.com
+casey at thestarkeffect.com = caseywstark at gmail.com
+chiffre = chiffre at posteo.de
+Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -4,27 +4,36 @@
freetype.cfg
hdf5.cfg
png.cfg
+rockstar.cfg
+yt_updater.log
+yt/analysis_modules/halo_finding/rockstar/rockstar_interface.c
yt/frontends/ramses/_ramses_reader.cpp
yt/utilities/amr_utils.c
yt/utilities/kdtree/forthonf2c.h
yt/utilities/libconfig_wrapper.c
yt/utilities/spatial/ckdtree.c
-yt/utilities/_amr_utils/CICDeposit.c
-yt/utilities/_amr_utils/ContourFinding.c
-yt/utilities/_amr_utils/DepthFirstOctree.c
-yt/utilities/_amr_utils/FixedInterpolator.c
-yt/utilities/_amr_utils/fortran_reader.c
-yt/utilities/_amr_utils/freetype_writer.c
-yt/utilities/_amr_utils/geometry_utils.c
-yt/utilities/_amr_utils/Interpolators.c
-yt/utilities/_amr_utils/kdtree.c
-yt/utilities/_amr_utils/misc_utilities.c
-yt/utilities/_amr_utils/Octree.c
-yt/utilities/_amr_utils/png_writer.c
-yt/utilities/_amr_utils/PointsInVolume.c
-yt/utilities/_amr_utils/QuadTree.c
-yt/utilities/_amr_utils/RayIntegrators.c
-yt/utilities/_amr_utils/VolumeIntegrator.c
+yt/utilities/lib/amr_kdtools.c
+yt/utilities/lib/CICDeposit.c
+yt/utilities/lib/ContourFinding.c
+yt/utilities/lib/DepthFirstOctree.c
+yt/utilities/lib/FixedInterpolator.c
+yt/utilities/lib/fortran_reader.c
+yt/utilities/lib/freetype_writer.c
+yt/utilities/lib/geometry_utils.c
+yt/utilities/lib/Interpolators.c
+yt/utilities/lib/kdtree.c
+yt/utilities/lib/misc_utilities.c
+yt/utilities/lib/Octree.c
+yt/utilities/lib/png_writer.c
+yt/utilities/lib/PointsInVolume.c
+yt/utilities/lib/QuadTree.c
+yt/utilities/lib/RayIntegrators.c
+yt/utilities/lib/VolumeIntegrator.c
+yt/utilities/lib/grid_traversal.c
+yt/utilities/lib/GridTree.c
+yt/utilities/lib/marching_cubes.c
+yt/utilities/lib/png_writer.h
+yt/utilities/lib/write_array.c
syntax: glob
*.pyc
.*.swp
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5152,3 +5152,7 @@
0000000000000000000000000000000000000000 svn.993
fff7118f00e25731ccf37cba3082b8fcb73cf90e svn.371
0000000000000000000000000000000000000000 svn.371
+6528c562fed6f994b8d1ecabaf375ddc4707dade mpi-opaque
+0000000000000000000000000000000000000000 mpi-opaque
+f15825659f5af3ce64aaad30062aff3603cbfb66 hop callback
+0000000000000000000000000000000000000000 hop callback
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 CREDITS
--- a/CREDITS
+++ b/CREDITS
@@ -1,29 +1,41 @@
YT is a group effort.
-Contributors: Matthew Turk (matthewturk at gmail.com)
- Britton Smith (brittonsmith at gmail.com)
- Jeff Oishi (jsoishi at gmail.com)
- Stephen Skory (s at skory.us)
- Sam Skillman (samskillman at gmail.com)
- Devin Silvia (devin.silvia at gmail.com)
- John Wise (jwise at astro.princeton.edu)
- David Collins (dcollins at physics.ucsd.edu)
- Christopher Moody (cemoody at ucsc.edu)
- Oliver Hahn (ohahn at stanford.edu)
- John ZuHone (jzuhone at cfa.harvard.edu)
- Chris Malone (cmalone at mail.astro.sunysb.edu)
- Cameron Hummels (chummels at astro.columbia.edu)
- Stefan Klemer (sklemer at phys.uni-goettingen.de)
- Tom Abel (tabel at stanford.edu)
- Andrew Myers (atmyers at astro.berkeley.edu)
- Michael Kuhlen (mqk at astro.berkeley.edu)
- Casey Stark (caseywstark at gmail.com)
- JC Passy (jcpassy at gmail.com)
- Eve Lee (elee at cita.utoronto.ca)
- Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
- Kacper Kowalik (xarthisius.kk at gmail.com)
- Nathan Goldbaum (goldbaum at ucolick.org)
- Anna Rosen (rosen at ucolick.org)
+Contributors: Tom Abel (tabel at stanford.edu)
+ David Collins (dcollins at physics.ucsd.edu)
+ Brian Crosby (crosby.bd at gmail.com)
+ Andrew Cunningham (ajcunn at gmail.com)
+ Nathan Goldbaum (goldbaum at ucolick.org)
+ Markus Haider (markus.haider at uibk.ac.at)
+ Cameron Hummels (chummels at gmail.com)
+ Christian Karch (chiffre at posteo.de)
+ Ji-hoon Kim (me at jihoonkim.org)
+ Steffen Klemer (sklemer at phys.uni-goettingen.de)
+ Kacper Kowalik (xarthisius.kk at gmail.com)
+ Michael Kuhlen (mqk at astro.berkeley.edu)
+ Eve Lee (elee at cita.utoronto.ca)
+ Yuan Li (yuan at astro.columbia.edu)
+ Chris Malone (chris.m.malone at gmail.com)
+ Josh Maloney (joshua.moloney at colorado.edu)
+ Chris Moody (cemoody at ucsc.edu)
+ Andrew Myers (atmyers at astro.berkeley.edu)
+ Jeff Oishi (jsoishi at gmail.com)
+ Jean-Claude Passy (jcpassy at uvic.ca)
+ Mark Richardson (Mark.L.Richardson at asu.edu)
+ Thomas Robitaille (thomas.robitaille at gmail.com)
+ Anna Rosen (rosen at ucolick.org)
+ Anthony Scopatz (scopatz at gmail.com)
+ Devin Silvia (devin.silvia at colorado.edu)
+ Sam Skillman (samskillman at gmail.com)
+ Stephen Skory (s at skory.us)
+ Britton Smith (brittonsmith at gmail.com)
+ Geoffrey So (gsiisg at gmail.com)
+ Casey Stark (caseywstark at gmail.com)
+ Elizabeth Tasker (tasker at astro1.sci.hokudai.ac.jp)
+ Stephanie Tonnesen (stonnes at gmail.com)
+ Matthew Turk (matthewturk at gmail.com)
+ Rich Wagner (rwagner at physics.ucsd.edu)
+ John Wise (jwise at physics.gatech.edu)
+ John ZuHone (jzuhone at gmail.com)
We also include the Delaunay Triangulation module written by Robert Kern of
Enthought, the cmdln.py module by Trent Mick, and the progressbar module by
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
-include distribute_setup.py
+include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
-recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 README
--- a/README
+++ b/README
@@ -1,6 +1,6 @@
Hi there! You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion,
-CASTRO, MAESTRO, ART and Ramses. It's written in python and heavily leverages
+simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
+Nyx, MAESTRO, ART and Ramses. It's written in python and heavily leverages
both NumPy and Matplotlib for fast arrays and visualization, respectively.
Full documentation and a user community can be found at:
@@ -17,8 +17,7 @@
there are options available, but it should be straightforward.
In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation in
-doc/how_to_develop_yt.txt or at:
+interested in helping out, see the developer documentation:
http://yt-project.org/doc/advanced/developing.html
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 distribute_setup.py
--- a/distribute_setup.py
+++ b/distribute_setup.py
@@ -14,11 +14,14 @@
This file can also be run as a script to install or upgrade setuptools.
"""
import os
+import shutil
import sys
import time
import fnmatch
import tempfile
import tarfile
+import optparse
+
from distutils import log
try:
@@ -46,7 +49,7 @@
args = [quote(arg) for arg in args]
return os.spawnl(os.P_WAIT, sys.executable, *args) == 0
-DEFAULT_VERSION = "0.6.21"
+DEFAULT_VERSION = "0.6.32"
DEFAULT_URL = "http://pypi.python.org/packages/source/d/distribute/"
SETUPTOOLS_FAKED_VERSION = "0.6c11"
@@ -63,7 +66,7 @@
""" % SETUPTOOLS_FAKED_VERSION
-def _install(tarball):
+def _install(tarball, install_args=()):
# extracting the tarball
tmpdir = tempfile.mkdtemp()
log.warn('Extracting in %s', tmpdir)
@@ -81,11 +84,14 @@
# installing
log.warn('Installing Distribute')
- if not _python_cmd('setup.py', 'install'):
+ if not _python_cmd('setup.py', 'install', *install_args):
log.warn('Something went wrong during the installation.')
log.warn('See the error message above.')
+ # exitcode will be 2
+ return 2
finally:
os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
def _build_egg(egg, tarball, to_dir):
@@ -110,6 +116,7 @@
finally:
os.chdir(old_wd)
+ shutil.rmtree(tmpdir)
# returning the result
log.warn(egg)
if not os.path.exists(egg):
@@ -144,7 +151,7 @@
except ImportError:
return _do_download(version, download_base, to_dir, download_delay)
try:
- pkg_resources.require("distribute>="+version)
+ pkg_resources.require("distribute>=" + version)
return
except pkg_resources.VersionConflict:
e = sys.exc_info()[1]
@@ -167,6 +174,7 @@
if not no_fake:
_create_fake_setuptools_pkg_info(to_dir)
+
def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
to_dir=os.curdir, delay=15):
"""Download distribute from a specified location and return its filename
@@ -203,6 +211,7 @@
dst.close()
return os.path.realpath(saveto)
+
def _no_sandbox(function):
def __no_sandbox(*args, **kw):
try:
@@ -227,6 +236,7 @@
return __no_sandbox
+
def _patch_file(path, content):
"""Will backup the file then patch it"""
existing_content = open(path).read()
@@ -245,15 +255,18 @@
_patch_file = _no_sandbox(_patch_file)
+
def _same_content(path, content):
return open(path).read() == content
+
def _rename_path(path):
new_name = path + '.OLD.%s' % time.time()
- log.warn('Renaming %s into %s', path, new_name)
+ log.warn('Renaming %s to %s', path, new_name)
os.rename(path, new_name)
return new_name
+
def _remove_flat_installation(placeholder):
if not os.path.isdir(placeholder):
log.warn('Unkown installation at %s', placeholder)
@@ -267,7 +280,7 @@
log.warn('Could not locate setuptools*.egg-info')
return
- log.warn('Removing elements out of the way...')
+ log.warn('Moving elements out of the way...')
pkg_info = os.path.join(placeholder, file)
if os.path.isdir(pkg_info):
patched = _patch_egg_dir(pkg_info)
@@ -289,11 +302,13 @@
_remove_flat_installation = _no_sandbox(_remove_flat_installation)
+
def _after_install(dist):
log.warn('After install bootstrap.')
placeholder = dist.get_command_obj('install').install_purelib
_create_fake_setuptools_pkg_info(placeholder)
+
def _create_fake_setuptools_pkg_info(placeholder):
if not placeholder or not os.path.exists(placeholder):
log.warn('Could not find the install location')
@@ -307,7 +322,11 @@
return
log.warn('Creating %s', pkg_info)
- f = open(pkg_info, 'w')
+ try:
+ f = open(pkg_info, 'w')
+ except EnvironmentError:
+ log.warn("Don't have permissions to write %s, skipping", pkg_info)
+ return
try:
f.write(SETUPTOOLS_PKG_INFO)
finally:
@@ -321,7 +340,10 @@
finally:
f.close()
-_create_fake_setuptools_pkg_info = _no_sandbox(_create_fake_setuptools_pkg_info)
+_create_fake_setuptools_pkg_info = _no_sandbox(
+ _create_fake_setuptools_pkg_info
+)
+
def _patch_egg_dir(path):
# let's check if it's already patched
@@ -343,6 +365,7 @@
_patch_egg_dir = _no_sandbox(_patch_egg_dir)
+
def _before_install():
log.warn('Before install bootstrap.')
_fake_setuptools()
@@ -351,7 +374,7 @@
def _under_prefix(location):
if 'install' not in sys.argv:
return True
- args = sys.argv[sys.argv.index('install')+1:]
+ args = sys.argv[sys.argv.index('install') + 1:]
for index, arg in enumerate(args):
for option in ('--root', '--prefix'):
if arg.startswith('%s=' % option):
@@ -359,7 +382,7 @@
return location.startswith(top_dir)
elif arg == option:
if len(args) > index:
- top_dir = args[index+1]
+ top_dir = args[index + 1]
return location.startswith(top_dir)
if arg == '--user' and USER_SITE is not None:
return location.startswith(USER_SITE)
@@ -376,11 +399,14 @@
return
ws = pkg_resources.working_set
try:
- setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools',
- replacement=False))
+ setuptools_dist = ws.find(
+ pkg_resources.Requirement.parse('setuptools', replacement=False)
+ )
except TypeError:
# old distribute API
- setuptools_dist = ws.find(pkg_resources.Requirement.parse('setuptools'))
+ setuptools_dist = ws.find(
+ pkg_resources.Requirement.parse('setuptools')
+ )
if setuptools_dist is None:
log.warn('No setuptools distribution found')
@@ -414,7 +440,7 @@
res = _patch_egg_dir(setuptools_location)
if not res:
return
- log.warn('Patched done.')
+ log.warn('Patching complete.')
_relaunch()
@@ -422,7 +448,9 @@
log.warn('Relaunching...')
# we have to relaunch the process
# pip marker to avoid a relaunch bug
- if sys.argv[:3] == ['-c', 'install', '--single-version-externally-managed']:
+ _cmd1 = ['-c', 'install', '--single-version-externally-managed']
+ _cmd2 = ['-c', 'install', '--record']
+ if sys.argv[:3] == _cmd1 or sys.argv[:3] == _cmd2:
sys.argv[0] = 'setup.py'
args = [sys.executable] + sys.argv
sys.exit(subprocess.call(args))
@@ -448,7 +476,7 @@
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
- tarinfo.mode = 448 # decimal for oct 0700
+ tarinfo.mode = 448 # decimal for oct 0700
self.extract(tarinfo, path)
# Reverse sort directories.
@@ -475,11 +503,39 @@
self._dbg(1, "tarfile: %s" % e)
-def main(argv, version=DEFAULT_VERSION):
+def _build_install_args(options):
+ """
+ Build the arguments to 'python setup.py install' on the distribute package
+ """
+ install_args = []
+ if options.user_install:
+ if sys.version_info < (2, 6):
+ log.warn("--user requires Python 2.6 or later")
+ raise SystemExit(1)
+ install_args.append('--user')
+ return install_args
+
+def _parse_args():
+ """
+ Parse the command line for options
+ """
+ parser = optparse.OptionParser()
+ parser.add_option(
+ '--user', dest='user_install', action='store_true', default=False,
+ help='install in user site package (requires Python 2.6 or later)')
+ parser.add_option(
+ '--download-base', dest='download_base', metavar="URL",
+ default=DEFAULT_URL,
+ help='alternative URL from where to download the distribute package')
+ options, args = parser.parse_args()
+ # positional arguments are ignored
+ return options
+
+def main(version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
- tarball = download_setuptools()
- _install(tarball)
-
+ options = _parse_args()
+ tarball = download_setuptools(download_base=options.download_base)
+ return _install(tarball, _build_install_args(options))
if __name__ == '__main__':
- main(sys.argv[1:])
+ sys.exit(main())
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 doc/README
--- a/doc/README
+++ b/doc/README
@@ -12,10 +12,3 @@
in the build/ directory, as well as at:
http://yt-project.org/doc/index.html
-
-You can also download a copy of the documentation and unzip it right here:
-
-wget http://yt-project.org/doc/download.zip
-unzip download.zip
-
-Then open index.html with your favorite web browser, and be off!
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 doc/activate.csh
--- a/doc/activate.csh
+++ b/doc/activate.csh
@@ -20,7 +20,7 @@
setenv YT_DEST
endif
set _OLD_VIRTUAL_YT_DEST="$YT_DEST"
-setenv YT_DEST "${VIRTUAL_ENV}:${YT_DEST}"
+setenv YT_DEST "${VIRTUAL_ENV}"
if ($?PYTHONPATH == 0) then
setenv PYTHONPATH
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 doc/coding_styleguide.txt
--- a/doc/coding_styleguide.txt
+++ b/doc/coding_styleguide.txt
@@ -13,12 +13,12 @@
* Do not use nested classes unless you have a very good reason to, such as
requiring a namespace or class-definition modification. Classes should live
at the top level. __metaclass__ is exempt from this.
- * Do not use unecessary parenthesis in conditionals. if((something) and
+ * Do not use unnecessary parenthesis in conditionals. if((something) and
(something_else)) should be rewritten as if something and something_else.
Python is more forgiving than C.
* Avoid copying memory when possible. For example, don't do
"a = a.reshape(3,4)" when "a.shape = (3,4)" will do, and "a = a * 3" should
- be "na.multiply(a, 3, a)".
+ be "np.multiply(a, 3, a)".
* In general, avoid all double-underscore method names: __something is usually
unnecessary.
* When writing a subclass, use the super built-in to access the super class,
@@ -40,8 +40,7 @@
from yt.visualization.plot_collection import PlotCollection
- * Numpy is to be imported as "na" not "np". While this may change in the
- future, for now this is the correct idiom.
+ * Numpy is to be imported as "np", after a long time of using "na".
* Do not use too many keyword arguments. If you have a lot of keyword
arguments, then you are doing too much in __init__ and not enough via
parameter setting.
@@ -51,7 +50,7 @@
replace the old class. Too many options makes for a confusing user
experience.
* Parameter files are a last resort.
- * The usage of the **kwargs construction should be avoided. If they cannoted
+ * The usage of the **kwargs construction should be avoided. If they cannot
be avoided, they must be explained, even if they are only to be passed on to
a nested function.
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -7,23 +7,27 @@
# There are a few options, but you only need to set *one* of them. And
# that's the next one, DEST_DIR. But, if you want to use an existing HDF5
# installation you can set HDF5_DIR, or if you want to use some other
-# subversion checkout of YT, you can set YT_DIR, too. (It'll already
+# subversion checkout of yt, you can set YT_DIR, too. (It'll already
# check the current directory and one up.
#
-# NOTE: If you have trouble with wxPython, set INST_WXPYTHON=0 .
-#
-# And, feel free to drop me a line: matthewturk at gmail.com
+# If you experience problems, please visit the Help section at
+# http://yt-project.org.
#
DEST_SUFFIX="yt-`uname -m`"
DEST_DIR="`pwd`/${DEST_SUFFIX/ /}" # Installation location
BRANCH="yt" # This is the branch to which we will forcibly update.
+if [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -n ${YT_DEST} ]
+then
+ DEST_DIR=${YT_DEST}
+fi
+
# Here's where you put the HDF5 path if you like; otherwise it'll download it
# and install it on its own
#HDF5_DIR=
-# If you need to supply arguments to the NumPy build, supply them here
+# If you need to supply arguments to the NumPy or SciPy build, supply them here
# This one turns on gfortran manually:
#NUMPY_ARGS="--fcompiler=gnu95"
# If you absolutely can't get the fortran to work, try this:
@@ -31,7 +35,7 @@
INST_HG=1 # Install Mercurial or not? If hg is not already
# installed, yt cannot be installed.
-INST_ZLIB=1 # On some systems (Kraken) matplotlib has issues with
+INST_ZLIB=1 # On some systems (Kraken) matplotlib has issues with
# the system zlib, which is compiled statically.
# If need be, you can turn this off.
INST_BZLIB=1 # On some systems, libbzip2 is missing. This can
@@ -42,9 +46,11 @@
INST_SQLITE3=1 # Install a local version of SQLite3?
INST_PYX=0 # Install PyX? Sometimes PyX can be problematic without a
# working TeX installation.
-INST_0MQ=0 # Install 0mq (for IPython) and affiliated bindings?
+INST_0MQ=1 # Install 0mq (for IPython) and affiliated bindings?
+INST_ROCKSTAR=0 # Install the Rockstar halo finder?
+INST_SCIPY=0 # Install scipy?
-# If you've got YT some other place, set this to point to it.
+# If you've got yt some other place, set this to point to it.
YT_DIR=""
# If you need to pass anything to matplotlib, do so here.
@@ -56,6 +62,33 @@
# arguments. For instance, "-j4"
MAKE_PROCS=""
+# Make sure we are NOT being run as root
+if [[ $EUID -eq 0 ]]
+then
+ echo "******************************************************"
+ echo "* *"
+ echo "* *"
+ echo "* IT IS A BAD IDEA TO RUN THIS SCRIPT AS ROOT!!!! *"
+ echo "* *"
+ echo "* *"
+ echo "******************************************************"
+ echo
+ echo "If you really want to do this, you must manually edit"
+ echo "the script to re-enable root-level installation. Sorry!"
+ exit 1
+fi
+if [[ ${DEST_DIR%/} == /usr/local ]]
+then
+ echo "******************************************************"
+ echo "* *"
+ echo "* *"
+ echo "* THIS SCRIPT WILL NOT INSTALL TO /usr/local !!!! *"
+ echo "* *"
+ echo "* *"
+ echo "******************************************************"
+ exit 1
+fi
+
#------------------------------------------------------------------------------#
# #
# Okay, the script starts here. Feel free to play with it, but hopefully #
@@ -65,6 +98,48 @@
LOG_FILE="${DEST_DIR}/yt_install.log"
+function write_config
+{
+ CONFIG_FILE=${DEST_DIR}/.yt_config
+
+ echo INST_HG=${INST_HG} > ${CONFIG_FILE}
+ echo INST_ZLIB=${INST_ZLIB} >> ${CONFIG_FILE}
+ echo INST_BZLIB=${INST_BZLIB} >> ${CONFIG_FILE}
+ echo INST_PNG=${INST_PNG} >> ${CONFIG_FILE}
+ echo INST_FTYPE=${INST_FTYPE} >> ${CONFIG_FILE}
+ echo INST_ENZO=${INST_ENZO} >> ${CONFIG_FILE}
+ echo INST_SQLITE3=${INST_SQLITE3} >> ${CONFIG_FILE}
+ echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE}
+ echo INST_0MQ=${INST_0MQ} >> ${CONFIG_FILE}
+ echo INST_ROCKSTAR=${INST_ROCKSTAR} >> ${CONFIG_FILE}
+ echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE}
+ echo YT_DIR=${YT_DIR} >> ${CONFIG_FILE}
+ echo MPL_SUPP_LDFLAGS=${MPL_SUPP_LDFLAGS} >> ${CONFIG_FILE}
+ echo MPL_SUPP_CFLAGS=${MPL_SUPP_CFLAGS} >> ${CONFIG_FILE}
+ echo MPL_SUPP_CXXFLAGS=${MPL_SUPP_CXXFLAGS} >> ${CONFIG_FILE}
+ echo MAKE_PROCS=${MAKE_PROCS} >> ${CONFIG_FILE}
+ if [ ${HDF5_DIR} ]
+ then
+ echo ${HDF5_DIR} >> ${CONFIG_FILE}
+ fi
+ if [ ${NUMPY_ARGS} ]
+ then
+ echo ${NUMPY_ARGS} >> ${CONFIG_FILE}
+ fi
+}
+
+# Write config settings to file.
+CONFIG_FILE=${DEST_DIR}/.yt_config
+mkdir -p ${DEST_DIR}
+if [ -z ${REINST_YT} ] || [ ${REINST_YT} -neq 1 ]
+then
+ write_config
+elif [ ${REINST_YT} ] && [ ${REINST_YT} -eq 1 ] && [ -f ${CONFIG_FILE} ]
+then
+ USED_CONFIG=1
+ source ${CONFIG_FILE}
+fi
+
function get_willwont
{
if [ $1 -eq 1 ]
@@ -91,7 +166,8 @@
echo " * *"
echo " ******************************************"
echo
- echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
+ echo "IF YOU CHOOSE TO PROCEED:"
+ echo "YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
echo " $ module swap PrgEnv-pgi PrgEnv-gnu"
echo
return
@@ -126,25 +202,6 @@
echo " $ module swap PE-pgi PE-gnu"
echo
fi
- if [ "${MYHOSTLONG%%ranger.tacc.utexas.edu}" != "${MYHOSTLONG}" ]
- then
- echo "Looks like you're on Ranger."
- echo
- echo "NOTE: YOU MUST BE IN THE GNU PROGRAMMING ENVIRONMENT"
- echo "These commands should take care of that for you:"
- echo
- echo " $ module unload mvapich2"
- echo " $ module swap pgi gcc"
- echo " $ module load mvapich2"
- echo
- fi
- if [ "${MYHOST##honest}" != "${MYHOST}" ]
- then
- echo "Looks like you're on Abe."
- echo "We're going to have to set some supplemental environment"
- echo "variables to get this to work..."
- MPL_SUPP_LDFLAGS="${MPL_SUPP_LDFLAGS} -L${DEST_DIR}/lib -L${DEST_DIR}/lib64 -L/usr/local/lib64 -L/usr/local/lib"
- fi
if [ "${MYHOST##steele}" != "${MYHOST}" ]
then
echo "Looks like you're on Steele."
@@ -156,30 +213,144 @@
echo " $ module load gcc"
echo
fi
+ if [ "${MYHOST##midway}" != "${MYHOST}" ]
+ then
+ echo "Looks like you're on Midway."
+ echo
+ echo " ******************************************"
+ echo " * It may be better to use the yt module! *"
+ echo " * *"
+ echo " * $ module load yt *"
+ echo " * *"
+ echo " ******************************************"
+ echo
+ return
+ fi
if [ "${MYOS##Darwin}" != "${MYOS}" ]
then
echo "Looks like you're running on Mac OSX."
echo
echo "NOTE: you must have the Xcode command line tools installed."
echo
- echo "OS X 10.5: download Xcode 3.0 from the mac developer tools"
- echo "website"
+ echo "The instructions for obtaining these tools varies according"
+ echo "to your exact OS version. On older versions of OS X, you"
+ echo "must register for an account on the apple developer tools"
+ echo "website: https://developer.apple.com/downloads to obtain the"
+ echo "download link."
+ echo
+ echo "We have gathered some additional instructions for each"
+ echo "version of OS X below. If you have trouble installing yt"
+ echo "after following these instructions, don't hesitate to contact"
+ echo "the yt user's e-mail list."
+ echo
+ echo "You can see which version of OSX you are running by clicking"
+ echo "'About This Mac' in the apple menu on the left hand side of"
+ echo "menu bar. We're assuming that you've installed all operating"
+ echo "system updates; if you have an older version, we suggest"
+ echo "running software update and installing all available updates."
+ echo
+ echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+ echo "Apple developer tools website."
echo
- echo "OS X 10.6: download Xcode 3.2 from the mac developer tools"
- echo "website"
+ echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+ echo "developer tools website. You can either download the"
+ echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+ echo "Software Update to update to XCode 3.2.6 or"
+ echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+ echo "bundle (4.1 GB)."
echo
- echo "OS X 10.7: download Xcode 4.0 from the mac app store or"
- echo "alternatively download the Xcode command line tools from"
- echo "the mac developer tools website"
+ echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+ echo "(search for Xcode)."
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
echo
- echo "NOTE: You may have problems if you are running OSX 10.6 (Snow"
- echo "Leopard) or newer. If you do, please set the following"
- echo "environment variables, remove any broken installation tree, and"
- echo "re-run this script verbatim."
+ echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+ echo "(search for Xcode)."
+ echo "Additionally, you will have to manually install the Xcode"
+ echo "command line tools, see:"
+ echo "http://stackoverflow.com/questions/9353444"
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
+ echo
+ echo "NOTE: It's possible that the installation will fail, if so,"
+ echo "please set the following environment variables, remove any"
+ echo "broken installation tree, and re-run this script verbatim."
echo
- echo "$ export CC=gcc-4.2"
- echo "$ export CXX=g++-4.2"
+ echo "$ export CC=gcc"
+ echo "$ export CXX=g++"
+ echo
+ OSX_VERSION=`sw_vers -productVersion`
+ if [ "${OSX_VERSION##10.8}" != "${OSX_VERSION}" ]
+ then
+ MPL_SUPP_CFLAGS="${MPL_SUPP_CFLAGS} -mmacosx-version-min=10.7"
+ MPL_SUPP_CXXFLAGS="${MPL_SUPP_CXXFLAGS} -mmacosx-version-min=10.7"
+ fi
+ fi
+ if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+ then
+ echo "Looks like you're on an OpenSUSE-compatible machine."
echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * devel_C_C++"
+ echo " * libopenssl-devel"
+ echo " * libuuid-devel"
+ echo " * zip"
+ echo " * gcc-c++"
+ echo
+ echo "You can accomplish this by executing:"
+ echo
+ echo "$ sudo zypper install -t pattern devel_C_C++"
+ echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+ echo
+ echo "I am also setting special configure arguments to Python to"
+ echo "specify control lib/lib64 issues."
+ PYCONF_ARGS="--libdir=${DEST_DIR}/lib"
+ fi
+ if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+ then
+ echo "Looks like you're on an Ubuntu-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * libssl-dev"
+ echo " * build-essential"
+ echo " * libncurses5"
+ echo " * libncurses5-dev"
+ echo " * zip"
+ echo " * uuid-dev"
+ echo " * libfreetype6-dev"
+ echo " * tk-dev"
+ echo
+ echo "You can accomplish this by executing:"
+ echo
+ echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev libfreetype6-dev tk-dev"
+ echo
+ echo
+ echo " Additionally, if you want to put yt's lib dir in your LD_LIBRARY_PATH"
+ echo " so you can use yt without the activate script, you might "
+ echo " want to consider turning off LIBZ and FREETYPE in this"
+ echo " install script by editing this file and setting"
+ echo
+ echo " INST_ZLIB=0"
+ echo " INST_FTYPE=0"
+ echo
+ echo " to avoid conflicts with other command-line programs "
+ echo " (like eog and evince, for example)."
+ fi
+ if [ $INST_SCIPY -eq 1 ]
+ then
+ echo
+ echo "Looks like you've requested that the install script build SciPy."
+ echo
+ echo "If the SciPy build fails, please uncomment one of the the lines"
+ echo "at the top of the install script that sets NUMPY_ARGS, delete"
+ echo "any broken installation tree, and re-run the install script"
+ echo "verbatim."
+ echo
+ echo "If that doesn't work, don't hesitate to ask for help on the yt"
+ echo "user's mailing list."
+ echo
fi
if [ ! -z "${CFLAGS}" ]
then
@@ -187,7 +358,7 @@
echo "******************************************"
echo "** **"
echo "** Your CFLAGS is not empty. **"
- echo "** This can beak h5py compilation. **"
+ echo "** This can break h5py compilation. **"
echo "** **"
echo "******************************************"
echo "******************************************"
@@ -199,9 +370,9 @@
echo
echo "========================================================================"
echo
-echo "Hi there! This is the YT installation script. We're going to download"
+echo "Hi there! This is the yt installation script. We're going to download"
echo "some stuff and install it to create a self-contained, isolated"
-echo "environment for YT to run within."
+echo "environment for yt to run within."
echo
echo "Inside the installation script you can set a few variables. Here's what"
echo "they're currently set to -- you can hit Ctrl-C and edit the values in "
@@ -239,10 +410,18 @@
get_willwont ${INST_PYX}
echo "be installing PyX"
+printf "%-15s = %s so I " "INST_SCIPY" "${INST_SCIPY}"
+get_willwont ${INST_SCIPY}
+echo "be installing scipy"
+
printf "%-15s = %s so I " "INST_0MQ" "${INST_0MQ}"
get_willwont ${INST_0MQ}
echo "be installing ZeroMQ"
+printf "%-15s = %s so I " "INST_ROCKSTAR" "${INST_ROCKSTAR}"
+get_willwont ${INST_0MQ}
+echo "be installing Rockstar"
+
echo
if [ -z "$HDF5_DIR" ]
@@ -264,6 +443,12 @@
echo "hit Ctrl-C."
echo
host_specific
+if [ ${USED_CONFIG} ]
+then
+ echo "Settings were loaded from ${CONFIG_FILE}."
+ echo "Remove this file if you wish to return to the default settings."
+ echo
+fi
echo "========================================================================"
echo
read -p "[hit enter] "
@@ -288,11 +473,18 @@
function do_setup_py
{
[ -e $1/done ] && return
- echo "Installing $1 (arguments: '$*')"
- [ ! -e $1/extracted ] && tar xfz $1.tar.gz
- touch $1/extracted
- cd $1
- if [ ! -z `echo $1 | grep h5py` ]
+ LIB=$1
+ shift
+ if [ -z "$@" ]
+ then
+ echo "Installing $LIB"
+ else
+ echo "Installing $LIB (arguments: '$@')"
+ fi
+ [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+ touch $LIB/extracted
+ cd $LIB
+ if [ ! -z `echo $LIB | grep h5py` ]
then
shift
( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -305,7 +497,7 @@
cd ..
}
-if type -P wget &>/dev/null
+if type -P wget &>/dev/null
then
echo "Using wget"
export GETFILE="wget -nv"
@@ -334,9 +526,17 @@
function get_ytproject
{
+ [ -e $1 ] && return
+ echo "Downloading $1 from yt-project.org"
+ ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+ ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytdata
+{
echo "Downloading $1 from yt-project.org"
[ -e $1 ] && return
- ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+ ${GETFILE} "http://yt-project.org/data/$1" || do_exit
( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
}
@@ -348,70 +548,115 @@
exit 1
fi
+# Get supplemental data.
+
+mkdir -p ${DEST_DIR}/data
+cd ${DEST_DIR}/data
+echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410 xray_emissivity.h5' > xray_emissivity.h5.sha512
+get_ytdata xray_emissivity.h5
+
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
# Now we dump all our SHA512 files out.
-
-echo '2c1933ab31246b4f4eba049d3288156e0a72f1730604e3ed7357849967cdd329e4647cf236c9442ecfb06d0aff03e6fc892a7ba2a5c1cf5c011b7ab9c619acec Cython-0.16.tar.gz' > Cython-0.16.tar.gz.sha512
-echo 'b8a12bf05b3aafa71135e47da81440fd0f16a4bd91954bc5615ad3d3b7f9df7d5a7d5620dc61088dc6b04952c5c66ebda947a4cfa33ed1be614c8ca8c0f11dff PhiloGL-1.4.2.zip' > PhiloGL-1.4.2.zip.sha512
-echo '44eea803870a66ff0bab08d13a8b3388b5578ebc1c807d1d9dca0a93e6371e91b15d02917a00b3b20dc67abb5a21dabaf9b6e9257a561f85eeff2147ac73b478 PyX-0.11.1.tar.gz' > PyX-0.11.1.tar.gz.sha512
-echo '1a754d560bfa433f0960ab3b5a62edb5f291be98ec48cf4e5941fa5b84139e200b87a52efbbd6fa4a76d6feeff12439eed3e7a84db4421940d1bbb576f7a684e Python-2.7.2.tgz' > Python-2.7.2.tgz.sha512
-echo 'c017d3d59dd324ac91af0edc178c76b60a5f90fbb775cf843e39062f95bd846238f2c53705f8890ed3f34bc0e6e75671a73d13875eb0287d6201cb45f0a2d338 bzip2-1.0.5.tar.gz' > bzip2-1.0.5.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
+echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
+echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b519218f93946400326e9b656669269ecb3e5232b944e18fbc3eadc4fe2b56244d68aae56d6f69042b4c87c58c881ee2aaa279561ea0f0f48d5842155f4de9de freetype-2.4.4.tar.gz' > freetype-2.4.4.tar.gz.sha512
-echo '1531789e0a77d4829796d18552a4de7aecae7e8b63763a7951a8091921995800740fe03e72a7dbd496a5590828131c5f046ddead695e5cba79343b8c205148d1 h5py-2.0.1.tar.gz' > h5py-2.0.1.tar.gz.sha512
-echo '9644896e4a84665ad22f87eb885cbd4a0c60a5c30085d5dd5dba5f3c148dbee626f0cb01e59a7995a84245448a3f1e9ba98687d3f10250e2ee763074ed8ddc0e hdf5-1.8.7.tar.gz' > hdf5-1.8.7.tar.gz.sha512
-echo 'ffc5c9e0c8c8ea66479abd467e442419bd1c867e6dbd180be6a032869467955dc570cfdf1388452871303a440738f302d3227ab7728878c4a114cfc45d29d23c ipython-0.12.tar.gz' > ipython-0.12.tar.gz.sha512
-echo 'e748b66a379ee1e7963b045c3737670acf6aeeff1ebed679f427e74b642faa77404c2d5bbddb922339f009c229d0af1ae77cc43eab290e50af6157a6406d833f libpng-1.2.43.tar.gz' > libpng-1.2.43.tar.gz.sha512
-echo 'f5ab95c29ef6958096970265a6079f0eb8c43a500924346c4a6c6eb89d9110eeeb6c34a53715e71240e82ded2b76a7b8d5a9b05a07baa000b2926718264ad8ff matplotlib-1.1.0.tar.gz' > matplotlib-1.1.0.tar.gz.sha512
-echo 'ec7416729f99f5eef6700507e740552e771d6dd8863f757311538d7d67a0eecd3426381bd42a7ddbf0771bdde8bba5cb943f60031ae3567d6a3dcac738facda8 mercurial-2.2.2.tar.gz' > mercurial-2.2.2.tar.gz.sha512
-echo 'de3dd37f753614055dcfed910e9886e03688b8078492df3da94b1ec37be796030be93291cba09e8212fffd3e0a63b086902c3c25a996cf1439e15c5b16e014d9 numpy-1.6.1.tar.gz' > numpy-1.6.1.tar.gz.sha512
-echo '5ad681f99e75849a5ca6f439c7a19bb51abc73d121b50f4f8e4c0da42891950f30407f761a53f0fe51b370b1dbd4c4f5a480557cb2444c8c7c7d5412b328a474 sqlite-autoconf-3070500.tar.gz' > sqlite-autoconf-3070500.tar.gz.sha512
-echo 'edae735960279d92acf58e1f4095c6392a7c2059b8f1d2c46648fc608a0fb06b392db2d073f4973f5762c034ea66596e769b95b3d26ad963a086b9b2d09825f2 zlib-1.2.3.tar.bz2' > zlib-1.2.3.tar.bz2.sha512
-echo 'fb3cf421b2dc48c31956b3e3ee4ab6ebc743deec3bf626c2238a1996c8c51be87260bd6aa662793a1f0c34dcda9b3146763777bb162dfad6fec4ca7acc403b2e zeromq-2.2.0.tar.gz' > zeromq-2.2.0.tar.gz.sha512
-echo 'd761b492352841cdc125d9f0c99ee6d6c435812472ea234728b7f0fb4ad1048e1eec9b399df2081fbc926566f333f7780fedd0ce23255a6633fe5c60ed15a6af pyzmq-2.1.11.tar.gz' > pyzmq-2.1.11.tar.gz.sha512
-echo '57fa5e57dfb98154a42d2d477f29401c2260ae7ad3a8128a4098b42ee3b35c54367b1a3254bc76b9b3b14b4aab7c3e1135858f68abc5636daedf2f01f9b8a3cf tornado-2.2.tar.gz' > tornado-2.2.tar.gz.sha512
-
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4 scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.7.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.3.tar.bz2
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.5.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.2.43.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.4.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3070500.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.11.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-2.2.0.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-2.1.11.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-2.2.tar.gz
-get_ytproject Python-2.7.2.tgz
-get_ytproject numpy-1.6.1.tar.gz
-get_ytproject matplotlib-1.1.0.tar.gz
-get_ytproject mercurial-2.2.2.tar.gz
-get_ytproject ipython-0.12.tar.gz
-get_ytproject h5py-2.0.1.tar.gz
-get_ytproject Cython-0.16.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
get_ytproject reason-js-20120623.zip
-
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
- if [ ! -e bzip2-1.0.5/done ]
+ if [ ! -e $BZLIB/done ]
then
- [ ! -e bzip2-1.0.5 ] && tar xfz bzip2-1.0.5.tar.gz
+ [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
echo "Installing BZLIB"
- cd bzip2-1.0.5
- if [ `uname` = "Darwin" ]
+ cd $BZLIB
+ if [ `uname` = "Darwin" ]
then
- if [ -z "${CC}" ]
+ if [ -z "${CC}" ]
then
sed -i.bak 's/soname/install_name/' Makefile-libbz2_so
else
- sed -i.bak -e 's/soname/install_name/' -e "s/CC=gcc/CC=${CC}/" Makefile-libbz2_so
+ sed -i.bak -e 's/soname/install_name/' -e "s|CC=gcc|CC=${CC}|" Makefile-libbz2_so
fi
fi
( make install CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make -f Makefile-libbz2_so CFLAGS=-fPIC LDFLAGS=-fPIC PREFIX=${DEST_DIR} 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( cp -v libbz2.so.1.0.4 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( cp -v libbz2.so.1.0.6 ${DEST_DIR}/lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
@@ -422,11 +667,11 @@
if [ $INST_ZLIB -eq 1 ]
then
- if [ ! -e zlib-1.2.3/done ]
+ if [ ! -e $ZLIB/done ]
then
- [ ! -e zlib-1.2.3 ] && tar xfj zlib-1.2.3.tar.bz2
+ [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
echo "Installing ZLIB"
- cd zlib-1.2.3
+ cd $ZLIB
( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -440,11 +685,11 @@
if [ $INST_PNG -eq 1 ]
then
- if [ ! -e libpng-1.2.43/done ]
+ if [ ! -e $PNG/done ]
then
- [ ! -e libpng-1.2.43 ] && tar xfz libpng-1.2.43.tar.gz
+ [ ! -e $PNG ] && tar xfz $PNG.tar.gz
echo "Installing PNG"
- cd libpng-1.2.43
+ cd $PNG
( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -458,13 +703,14 @@
if [ $INST_FTYPE -eq 1 ]
then
- if [ ! -e freetype-2.4.4/done ]
+ if [ ! -e $FREETYPE_VER/done ]
then
- [ ! -e freetype-2.4.4 ] && tar xfz freetype-2.4.4.tar.gz
+ [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
echo "Installing FreeType2"
- cd freetype-2.4.4
+ cd $FREETYPE_VER
( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
@@ -476,11 +722,11 @@
if [ -z "$HDF5_DIR" ]
then
- if [ ! -e hdf5-1.8.7/done ]
+ if [ ! -e $HDF5/done ]
then
- [ ! -e hdf5-1.8.7 ] && tar xfz hdf5-1.8.7.tar.gz
+ [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
echo "Installing HDF5"
- cd hdf5-1.8.7
+ cd $HDF5
( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -495,11 +741,11 @@
if [ $INST_SQLITE3 -eq 1 ]
then
- if [ ! -e sqlite-autoconf-3070500/done ]
+ if [ ! -e $SQLITE/done ]
then
- [ ! -e sqlite-autoconf-3070500 ] && tar xfz sqlite-autoconf-3070500.tar.gz
+ [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
echo "Installing SQLite3"
- cd sqlite-autoconf-3070500
+ cd $SQLITE
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -508,12 +754,12 @@
fi
fi
-if [ ! -e Python-2.7.2/done ]
+if [ ! -e $PYTHON/done ]
then
- echo "Installing Python. This may take a while, but don't worry. YT loves you."
- [ ! -e Python-2.7.2 ] && tar xfz Python-2.7.2.tgz
- cd Python-2.7.2
- ( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ echo "Installing Python. This may take a while, but don't worry. yt loves you."
+ [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+ cd $PYTHON
+ ( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -527,12 +773,11 @@
if [ $INST_HG -eq 1 ]
then
- echo "Installing Mercurial."
- do_setup_py mercurial-2.2.2
+ do_setup_py $MERCURIAL
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
- if type -P hg &>/dev/null
+ if type -P hg &>/dev/null
then
export HG_EXEC=hg
else
@@ -549,17 +794,17 @@
elif [ -e $ORIG_PWD/../yt/mods.py ]
then
YT_DIR=`dirname $ORIG_PWD`
- elif [ ! -e yt-hg ]
+ elif [ ! -e yt-hg ]
then
YT_DIR="$PWD/yt-hg/"
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt-supplemental/ 2>&1 ) 1>> ${LOG_FILE}
# Recently the hg server has had some issues with timeouts. In lieu of
# a new webserver, we are now moving to a three-stage process.
# First we clone the repo, but only up to r0.
- ( ${HG_EXEC} --debug clone http://hg.yt-project.org/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
+ ( ${HG_EXEC} --debug clone https://bitbucket.org/yt_analysis/yt/ ./yt-hg 2>&1 ) 1>> ${LOG_FILE}
# Now we update to the branch we're interested in.
( ${HG_EXEC} -R ${YT_DIR} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
- elif [ -e yt-hg ]
+ elif [ -e yt-hg ]
then
YT_DIR="$PWD/yt-hg/"
fi
@@ -567,7 +812,7 @@
fi
# This fixes problems with gfortran linking.
-unset LDFLAGS
+unset LDFLAGS
echo "Installing distribute"
( ${DEST_DIR}/bin/python2.7 ${YT_DIR}/distribute_setup.py 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -575,7 +820,40 @@
echo "Installing pip"
( ${DEST_DIR}/bin/easy_install-2.7 pip 2>&1 ) 1>> ${LOG_FILE} || do_exit
-do_setup_py numpy-1.6.1 ${NUMPY_ARGS}
+if [ $INST_SCIPY -eq 0 ]
+then
+ do_setup_py $NUMPY ${NUMPY_ARGS}
+else
+ if [ ! -e $SCIPY/done ]
+ then
+ if [ ! -e BLAS/done ]
+ then
+ tar xfz blas.tar.gz
+ echo "Building BLAS"
+ cd BLAS
+ gfortran -O2 -fPIC -fno-second-underscore -c *.f
+ ar r libfblas.a *.o 2>> ${LOG_FILE}
+ ranlib libfblas.a 1>> ${LOG_FILE}
+ rm -rf *.o
+ touch done
+ cd ..
+ fi
+ if [ ! -e $LAPACK/done ]
+ then
+ tar xfz $LAPACK.tar.gz
+ echo "Building LAPACK"
+ cd $LAPACK/
+ cp INSTALL/make.inc.gfortran make.inc
+ make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
+ touch done
+ cd ..
+ fi
+ fi
+ export BLAS=$PWD/BLAS/libfblas.a
+ export LAPACK=$PWD/$LAPACK/liblapack.a
+ do_setup_py $NUMPY ${NUMPY_ARGS}
+ do_setup_py $SCIPY ${NUMPY_ARGS}
+fi
if [ -n "${MPL_SUPP_LDFLAGS}" ]
then
@@ -596,10 +874,10 @@
echo "Setting CFLAGS ${CFLAGS}"
fi
# Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.1.0
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.1.0/setup.cfg
-do_setup_py matplotlib-1.1.0
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
if [ -n "${OLD_LDFLAGS}" ]
then
export LDFLAG=${OLD_LDFLAGS}
@@ -611,36 +889,57 @@
# Now we do our IPython installation, which has two optional dependencies.
if [ $INST_0MQ -eq 1 ]
then
- if [ ! -e zeromq-2.2.0/done ]
+ if [ ! -e $ZEROMQ/done ]
then
- [ ! -e zeromq-2.2.0 ] && tar xfz zeromq-2.2.0.tar.gz
+ [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
echo "Installing ZeroMQ"
- cd zeromq-2.2.0
+ cd $ZEROMQ
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
- do_setup_py pyzmq-2.1.11 --zmq=${DEST_DIR}
- do_setup_py tornado-2.2
+ do_setup_py $PYZMQ --zmq=${DEST_DIR}
+ do_setup_py $TORNADO
fi
-do_setup_py ipython-0.12
-do_setup_py h5py-2.0.1
-do_setup_py Cython-0.16
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.11.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
+
+# Now we build Rockstar and set its environment variable.
+if [ $INST_ROCKSTAR -eq 1 ]
+then
+ if [ ! -e Rockstar/done ]
+ then
+ [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
+ echo "Building Rockstar"
+ cd Rockstar
+ ( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ cp librockstar.so ${DEST_DIR}/lib
+ ROCKSTAR_DIR=${DEST_DIR}/src/Rockstar
+ echo $ROCKSTAR_DIR > ${YT_DIR}/rockstar.cfg
+ touch done
+ cd ..
+ fi
+fi
echo "Doing yt update, wiping local changes and updating to branch ${BRANCH}"
MY_PWD=`pwd`
cd $YT_DIR
-( ${HG_EXEC} pull && ${HG_EXEC} up -C ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
+( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
echo $HDF5_DIR > hdf5.cfg
[ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
[ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
-( ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
+( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd $MY_PWD
@@ -654,7 +953,7 @@
then
echo "Cloning a copy of Enzo."
cd ${DEST_DIR}/src/
- ${HG_EXEC} clone https://enzo.googlecode.com/hg/ ./enzo-hg-stable
+ ${HG_EXEC} clone https://bitbucket.org/enzo/enzo-stable ./enzo-hg-stable
cd $MY_PWD
fi
@@ -690,31 +989,20 @@
echo "environment."
echo
echo " $ source $DEST_DIR/bin/activate"
- echo " (yt)$ "
echo
echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
- echo "LD_LIBRARY_PATH to match your new yt install. But don't worry - as soon"
- echo "as you are done you can run 'deactivate' to return to your previous"
- echo "shell environment. If you use csh, just append .csh to the above."
+ echo "LD_LIBRARY_PATH to match your new yt install. If you use csh, just"
+ echo "append .csh to the above."
echo
- echo "For interactive data analysis and visualization, we recommend running"
- echo "the IPython interface, which will become more fully featured with time:"
+ echo "To get started with yt, check out the orientation:"
echo
- echo " $DEST_DIR/bin/iyt"
+ echo " http://yt-project.org/doc/orientation/"
echo
- echo "For command line analysis run:"
+ echo "or just activate your environment and run 'yt serve' to bring up the"
+ echo "yt GUI."
echo
- echo " $DEST_DIR/bin/yt"
- echo
- echo "To bootstrap a development environment for yt, run:"
- echo
- echo " $DEST_DIR/bin/yt bootstrap_dev"
- echo
- echo "Note of interest: this installation will use the directory:"
+ echo "The source for yt is located at:"
echo " $YT_DIR"
- echo "as the source for all the YT code. This means you probably shouldn't"
- echo "delete it, but on the plus side, any changes you make there are"
- echo "automatically propagated."
if [ $INST_HG -eq 1 ]
then
echo
@@ -737,6 +1025,9 @@
echo "For support, see the website and join the mailing list:"
echo
echo " http://yt-project.org/"
+ echo " http://yt-project.org/data/ (Sample data)"
+ echo " http://yt-project.org/doc/ (Docs)"
+ echo
echo " http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
echo
echo "========================================================================"
@@ -747,3 +1038,6 @@
print_afterword
print_afterword >> ${LOG_FILE}
+
+echo "yt dependencies were last updated on" > ${DEST_DIR}/.yt_update
+date >> ${DEST_DIR}/.yt_update
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 nose.cfg
--- /dev/null
+++ b/nose.cfg
@@ -0,0 +1,4 @@
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
#!python
import os, re
-from distutils import version
+from distutils.version import LooseVersion
from yt.mods import *
from yt.data_objects.data_containers import AMRData
namespace = locals().copy()
@@ -23,10 +23,12 @@
code.interact(doc, None, namespace)
sys.exit()
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+ api_version = '0.11'
else:
- api_version = '0.11'
+ api_version = '1.0'
if api_version == "0.10" and "DISPLAY" in os.environ:
from matplotlib import rcParams
@@ -42,13 +44,18 @@
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
elif api_version == "0.10":
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
- from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+ if api_version == "0.11":
+ from IPython.frontend.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ elif api_version == "1.0":
+ from IPython.terminal.interactiveshell import TerminalInteractiveShell
+ else:
+ raise RuntimeError
ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
display_banner = True)
if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
- raise RuntimeError
+
# The rest is a modified version of the IPython default profile code
@@ -77,7 +84,7 @@
ip = ip_shell.IP.getapi()
try_next = IPython.ipapi.TryNext
kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
ip = ip_shell
try_next = IPython.core.error.TryNext
kwargs = dict()
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 setup.cfg
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,3 +1,9 @@
[egg_info]
#tag_build = .dev
#tag_svn_revision = 1
+
+[nosetests]
+detailed-errors=1
+where=yt
+exclude=answer_testing
+with-xunit=1
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 setup.py
--- a/setup.py
+++ b/setup.py
@@ -4,11 +4,64 @@
import sys
import time
import subprocess
-import distribute_setup
-distribute_setup.use_setuptools()
+import shutil
+import glob
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+ import distribute_setup
+ distribute_setup.use_setuptools()
+from distutils.command.build_py import build_py
from numpy.distutils.misc_util import appendpath
+from numpy.distutils.command import install_data as np_install_data
from numpy.distutils import log
+from distutils import version
+
+from distutils.core import Command
+from distutils.spawn import find_executable
+
+def find_fortran_deps():
+ return (find_executable("Forthon"),
+ find_executable("gfortran"))
+
+class BuildForthon(Command):
+
+ """Command for building Forthon modules"""
+
+ description = "Build Forthon modules"
+ user_options = []
+
+ def initialize_options(self):
+
+ """init options"""
+
+ pass
+
+ def finalize_options(self):
+
+ """finalize options"""
+
+ pass
+
+ def run(self):
+
+ """runner"""
+ (Forthon_exe, gfortran_exe) = find_fortran_deps()
+ if None in (Forthon_exe, gfortran_exe):
+ sys.stderr.write(
+ "fKDpy.so won't be built due to missing Forthon/gfortran\n"
+ )
+ return
+
+ cwd = os.getcwd()
+ os.chdir(os.path.join(cwd, 'yt/utilities/kdtree'))
+ cmd = [Forthon_exe, "-F", "gfortran", "--compile_first",
+ "fKD_source", "--no2underscores", "--fopt", "'-O3'", "fKD",
+ "fKD_source.f90"]
+ subprocess.check_call(cmd, shell=False)
+ shutil.move(glob.glob('build/lib*/fKDpy.so')[0], os.getcwd())
+ os.chdir(cwd)
REASON_FILES = []
REASON_DIRS = [
@@ -34,15 +87,24 @@
files = []
for ext in ["js", "html", "css", "png", "ico", "gif"]:
files += glob.glob("%s/*.%s" % (dir_name, ext))
- REASON_FILES.append( (dir_name, files) )
+ REASON_FILES.append((dir_name, files))
# Verify that we have Cython installed
try:
import Cython
+ if version.LooseVersion(Cython.__version__) < version.LooseVersion('0.16'):
+ needs_cython = True
+ else:
+ needs_cython = False
except ImportError as e:
+ needs_cython = True
+
+if needs_cython:
print "Cython is a build-time requirement for the source tree of yt."
print "Please either install yt from a provided, release tarball,"
- print "or install Cython (version 0.15 or higher)."
+ print "or install Cython (version 0.16 or higher)."
+ print "You may be able to accomplish this by typing:"
+ print " pip install -U Cython"
sys.exit(1)
######
@@ -82,10 +144,10 @@
language=extension.language, cplus=cplus,
output_file=target_file)
cython_result = Cython.Compiler.Main.compile(source,
- options=options)
+ options=options)
if cython_result.num_errors != 0:
- raise DistutilsError("%d errors while compiling %r with Cython" \
- % (cython_result.num_errors, source))
+ raise DistutilsError("%d errors while compiling %r with Cython"
+ % (cython_result.num_errors, source))
return target_file
@@ -94,11 +156,66 @@
# End snippet
######
-import setuptools
+VERSION = "2.6dev"
-VERSION = "2.4dev"
+if os.path.exists('MANIFEST'):
+ os.remove('MANIFEST')
-if os.path.exists('MANIFEST'): os.remove('MANIFEST')
+
+def get_mercurial_changeset_id(target_dir):
+ """adapted from a script by Jason F. Harris, published at
+
+ http://jasonfharris.com/blog/2010/05/versioning-your-application-with-the-mercurial-changeset-hash/
+
+ """
+ import subprocess
+ import re
+ get_changeset = subprocess.Popen('hg identify -b -i',
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ shell=True)
+
+ if (get_changeset.stderr.read() != ""):
+ print "Error in obtaining current changeset of the Mercurial repository"
+ changeset = None
+
+ changeset = get_changeset.stdout.read().strip()
+ if (not re.search("^[0-9a-f]{12}", changeset)):
+ print "Current changeset of the Mercurial repository is malformed"
+ changeset = None
+
+ return changeset
+
+
+class my_build_src(build_src.build_src):
+ def run(self):
+ self.run_command("build_forthon")
+ build_src.build_src.run(self)
+
+
+class my_install_data(np_install_data.install_data):
+ def run(self):
+ (Forthon_exe, gfortran_exe) = find_fortran_deps()
+ if None in (Forthon_exe, gfortran_exe):
+ pass
+ else:
+ self.distribution.data_files.append(
+ ('yt/utilities/kdtree', ['yt/utilities/kdtree/fKDpy.so'])
+ )
+ np_install_data.install_data.run(self)
+
+class my_build_py(build_py):
+ def run(self):
+ # honor the --dry-run flag
+ if not self.dry_run:
+ target_dir = os.path.join(self.build_lib, 'yt')
+ src_dir = os.getcwd()
+ changeset = get_mercurial_changeset_id(src_dir)
+ self.mkpath(target_dir)
+ with open(os.path.join(target_dir, '__hg_version__.py'), 'w') as fobj:
+ fobj.write("hg_version = '%s'\n" % changeset)
+
+ build_py.run(self)
def configuration(parent_package='', top_path=None):
@@ -111,7 +228,7 @@
quiet=True)
config.make_config_py()
- #config.make_svn_version_py()
+ # config.make_svn_version_py()
config.add_subpackage('yt', 'yt')
config.add_scripts("scripts/*")
@@ -129,22 +246,26 @@
+ "simulations, focusing on Adaptive Mesh Refinement data "
"from Enzo, Orion, FLASH, and others.",
classifiers=["Development Status :: 5 - Production/Stable",
- "Environment :: Console",
- "Intended Audience :: Science/Research",
- "License :: OSI Approved :: GNU General Public License (GPL)",
- "Operating System :: MacOS :: MacOS X",
- "Operating System :: POSIX :: AIX",
- "Operating System :: POSIX :: Linux",
- "Programming Language :: C",
- "Programming Language :: Python",
- "Topic :: Scientific/Engineering :: Astronomy",
- "Topic :: Scientific/Engineering :: Physics",
- "Topic :: Scientific/Engineering :: Visualization"],
- keywords='astronomy astrophysics visualization ' + \
- 'amr adaptivemeshrefinement',
+ "Environment :: Console",
+ "Intended Audience :: Science/Research",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: POSIX :: AIX",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: C",
+ "Programming Language :: Python",
+ "Topic :: Scientific/Engineering :: Astronomy",
+ "Topic :: Scientific/Engineering :: Physics",
+ "Topic :: Scientific/Engineering :: Visualization"],
+ keywords='astronomy astrophysics visualization ' +
+ 'amr adaptivemeshrefinement',
entry_points={'console_scripts': [
- 'yt = yt.utilities.command_line:run_main',
- ]},
+ 'yt = yt.utilities.command_line:run_main',
+ ],
+ 'nose.plugins.0.10': [
+ 'answer-testing = yt.utilities.answer_testing.framework:AnswerTesting'
+ ]
+ },
author="Matthew J. Turk",
author_email="matthewturk at gmail.com",
url="http://yt-project.org/",
@@ -152,7 +273,9 @@
configuration=configuration,
zip_safe=False,
data_files=REASON_FILES,
- )
+ cmdclass={'build_py': my_build_py, 'build_forthon': BuildForthon,
+ 'build_src': my_build_src, 'install_data': my_install_data},
+ )
return
if __name__ == '__main__':
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 tests/runall.py
--- a/tests/runall.py
+++ b/tests/runall.py
@@ -126,7 +126,9 @@
failures = 0
passes = 1
for test_name, result in sorted(rtr.passed_tests.items()):
- print "TEST %s: %s" % (test_name, result)
+ if not result:
+ print "TEST %s: %s" % (test_name, result)
+ print " %s" % rtr.test_messages[test_name]
if result: passes += 1
else: failures += 1
print "Number of passes : %s" % passes
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -81,3 +81,28 @@
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
+
+__version__ = "2.5-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+ import nose, os, sys
+ from yt.config import ytcfg
+ nose_argv = sys.argv
+ nose_argv += ['--exclude=answer_testing','--detailed-errors']
+ if verbose:
+ nose_argv.append('-v')
+ if run_answer_tests:
+ nose_argv.append('--with-answer-testing')
+ if answer_big_data:
+ nose_argv.append('--answer-big-data')
+ log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+ ytcfg.set("yt","suppressStreamLogging", 'True')
+ initial_dir = os.getcwd()
+ yt_file = os.path.abspath(__file__)
+ yt_dir = os.path.dirname(yt_file)
+ os.chdir(yt_dir)
+ try:
+ nose.run(argv=nose_argv)
+ finally:
+ os.chdir(initial_dir)
+ ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
diff -r b7e5a65fad3fac9ca9ecd1008a8ff3a285adf374 -r bc9399e08adb691e77bf90306956da957310f667 yt/analysis_modules/absorption_spectrum/absorption_line.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_line.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_line.py
@@ -23,7 +23,7 @@
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
-import numpy as na
+import numpy as np
def voigt(a,u):
"""
@@ -65,15 +65,15 @@
J. Murthy, Mar 1990 (adapted from the FORTRAN program of Armstrong)
Sep 1990 (better overflow checking)
"""
- x = na.asarray(u).astype(na.float64)
- y = na.asarray(a).astype(na.float64)
+ x = np.asarray(u).astype(np.float64)
+ y = np.asarray(a).astype(np.float64)
- w = na.array([0.462243670, 0.286675505, 0.109017206,
+ w = np.array([0.462243670, 0.286675505, 0.109017206,
0.0248105209, 0.00324377334, 0.000228338636,
7.80255648e-6, 1.08606937e-7, 4.39934099e-10,
2.22939365e-13])
- t = na.array([0.245340708, 0.737473729, 1.23407622, 1.73853771,
+ t = np.array([0.245340708, 0.737473729, 1.23407622, 1.73853771,
2.25497400, 2.78880606, 3.34785457, 3.94476404,
4.60368245, 5.38748089])
@@ -94,31 +94,31 @@
y2 = y * y
# limits are y<1., x<4 or y<1.8(x+1), x>4 (no checking performed)
- u1 = na.exp(-x * x + y2) * na.cos(2. * x * y)
+ u1 = np.exp(-x * x + y2) * np.cos(2. * x * y)
# Clenshaw's Algorithm
- bno1 = na.zeros(x.shape)
- bno2 = na.zeros(x.shape)
- x1 = na.clip((x / 5.), -na.inf, 1.)
+ bno1 = np.zeros(x.shape)
+ bno2 = np.zeros(x.shape)
+ x1 = np.clip((x / 5.), -np.inf, 1.)
coef = 4. * x1 * x1 - 2.
for i in range(33, -1, -1):
bn = coef * bno1 - bno2 + c[i]
- bno2 = na.copy(bno1)
- bno1 = na.copy(bn)
+ bno2 = np.copy(bno1)
+ bno1 = np.copy(bn)
f = x1 * (bn - bno2)
dno1 = 1. - 2. * x * f
dno2 = f
- q = na.abs(x) > 5
+ q = np.abs(x) > 5
if q.any():
- x14 = na.power(na.clip(x[q], -na.inf, 500.), 14)
- x12 = na.power(na.clip(x[q], -na.inf, 1000.), 12)
- x10 = na.power(na.clip(x[q], -na.inf, 5000.), 10)
- x8 = na.power(na.clip(x[q], -na.inf, 50000.), 8)
- x6 = na.power(na.clip(x[q], -na.inf, 1.e6), 6)
- x4 = na.power(na.clip(x[q], -na.inf, 1.e9), 4)
- x2 = na.power(na.clip(x[q], -na.inf, 1.e18), 2)
+ x14 = np.power(np.clip(x[q], -np.inf, 500.), 14)
+ x12 = np.power(np.clip(x[q], -np.inf, 1000.), 12)
+ x10 = np.power(np.clip(x[q], -np.inf, 5000.), 10)
+ x8 = np.power(np.clip(x[q], -np.inf, 50000.), 8)
+ x6 = np.power(np.clip(x[q], -np.inf, 1.e6), 6)
+ x4 = np.power(np.clip(x[q], -np.inf, 1.e9), 4)
+ x2 = np.power(np.clip(x[q], -np.inf, 1.e18), 2)
dno1[q] = -(0.5 / x2 + 0.75 / x4 + 1.875 / x6 +
6.5625 / x8 + 29.53125 / x10 +
162.4218 / x12 + 1055.7421 / x14)
@@ -135,12 +135,12 @@
if (i % 2) == 1:
q = -q
yn = yn * y2
- g = dn.astype(na.float64) * yn
+ g = dn.astype(np.float64) * yn
funct = funct + q * g
- if na.max(na.abs(g / funct)) <= 1.e-8: break
+ if np.max(np.abs(g / funct)) <= 1.e-8: break
k1 = u1 - 1.12837917 * funct
- k1 = k1.astype(na.float64).clip(0)
+ k1 = k1.astype(np.float64).clip(0)
return k1
def tau_profile(lam0, fval, gamma, vkms, column_density,
@@ -191,19 +191,19 @@
## create wavelength
if lambda_bins is None:
lambda_bins = lam1 + \
- na.arange(n_lambda, dtype=na.float) * dlambda - \
+ np.arange(n_lambda, dtype=np.float) * dlambda - \
n_lambda * dlambda / 2 # wavelength vector (angstroms)
nua = ccgs / (lambda_bins / 1.e8) # frequency vector (Hz)
## tau_0
- tau_X = na.sqrt(na.pi) * e**2 / (me * ccgs) * \
+ tau_X = np.sqrt(np.pi) * e**2 / (me * ccgs) * \
column_density * fval / vdop
tau1 = tau_X * lam1cgs
tau0 = tau_X * lam0cgs
# dimensionless frequency offset in units of doppler freq
x = (nua - nu1) / nudop
- a = gamma / (4 * na.pi * nudop) # damping parameter
+ a = gamma / (4 * np.pi * nudop) # damping parameter
phi = voigt(a, x) # profile
tauphi = tau0 * phi # profile scaled with tau0
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/7cf87ba3a876/
Changeset: 7cf87ba3a876
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 20:29:41
Summary: Merging from the rockstar_work bookmark, to begin finishing Rockstar in 3.0.
Affected #: 1 file
diff -r f261b59f1b6ce921f97b037ee59482473c18fa5e -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -26,9 +26,27 @@
np.int64_t id
float pos[6]
+ctypedef struct particleflat:
+ np.int64_t id
+ float pos_x
+ float pos_y
+ float pos_z
+ float vel_x
+ float vel_y
+ float vel_z
+
+cdef import from "halo.h":
+ struct halo:
+ np.int64_t id
+ float pos[6], corevel[3], bulkvel[3]
+ float m, r, child_r, mgrav, vmax, rvmax, rs, vrms, J[3], energy, spin
+ np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+ float min_pos_err, min_vel_err, min_bulkvel_err
+
cdef import from "io_generic.h":
ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p)
- void set_load_particles_generic(LPG func)
+ ctypedef void (*AHG) (halo *h, particle *hp)
+ void set_load_particles_generic(LPG func, AHG afunc)
cdef import from "rockstar.h":
void rockstar(float *bounds, np.int64_t manual_subs)
@@ -139,7 +157,15 @@
# Forward declare
cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
+cdef void rh_analyze_halo(halo *h, particle *hp):
+ cdef particleflat[:] pslice
+ pslice = <particleflat[:h.num_p]> (<particleflat *>hp)
+ parray = np.asarray(pslice)
+ for cb in rh.callbacks:
+ cb(rh.pf, parray)
+ # This is where we call our functions
+
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
global SCALE_NOW
cdef np.float64_t conv[6], left_edge[6]
cdef np.ndarray[np.int64_t, ndim=1] arri
@@ -236,6 +262,7 @@
cdef public int total_particles
cdef public int dm_only
cdef public int hires_only
+ cdef public object callbacks
def __cinit__(self, ts):
self.ts = ts
@@ -250,7 +277,8 @@
int writing_port = -1, int block_ratio = 1,
int periodic = 1, force_res=None,
int min_halo_size = 25, outbase = "None",
- int dm_only = 0, int hires_only = False):
+ int dm_only = 0, int hires_only = False,
+ callbacks = None):
global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -279,7 +307,6 @@
NUM_WRITERS = num_writers
NUM_BLOCKS = num_readers
MIN_HALO_OUTPUT_SIZE=min_halo_size
- TOTAL_PARTICLES = total_particles
self.block_ratio = block_ratio
self.dm_only = dm_only
self.hires_only = hires_only
@@ -289,6 +316,8 @@
Ol = tpf.omega_lambda
Om = tpf.omega_matter
SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
+ if callbacks is None: callbacks = []
+ self.callbacks = callbacks
if not outbase =='None'.decode('UTF-8'):
#output directory. since we can't change the output filenames
#workaround is to make a new directory
@@ -300,9 +329,9 @@
tpf.domain_left_edge[0]) * tpf['mpchcm']
setup_config()
rh = self
- rh.dm_type = dm_type
cdef LPG func = rh_read_particles
- set_load_particles_generic(func)
+ cdef AHG afunc = rh_analyze_halo
+ set_load_particles_generic(func, afunc)
def call_rockstar(self):
read_particles("generic")
@@ -320,3 +349,4 @@
def start_writer(self):
cdef np.int64_t in_type = np.int64(WRITER_TYPE)
client(in_type)
+
https://bitbucket.org/yt_analysis/yt-3.0/commits/84b917f54846/
Changeset: 84b917f54846
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-30 07:30:52
Summary: fixing mass fields
Affected #: 5 files
diff -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 -r 84b917f54846abdf9a8e1832868bca72ca339e41 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -190,7 +190,7 @@
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
+ print "local_parts", local_parts
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
@@ -201,8 +201,12 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
+ if "grids" in dir(pf.h):
+ sources = pf.h._get_objs("grids")
+ else:
+ sources = [pf.h.all_data()]
+ for g in sources:
+ if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
@@ -233,6 +237,7 @@
fi += 1
pi += npart
num_p[0] = local_parts
+ print "finished read"
cdef class RockstarInterface:
diff -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 -r 84b917f54846abdf9a8e1832868bca72ca339e41 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -234,6 +234,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
diff -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 -r 84b917f54846abdf9a8e1832868bca72ca339e41 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -244,17 +244,36 @@
j_mag = [amx.sum(), amy.sum(), amz.sum()]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
diff -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 -r 84b917f54846abdf9a8e1832868bca72ca339e41 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -734,17 +734,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -873,6 +873,60 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return sphrt
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphrp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _ParticleRadialVelocity(field, data):
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
diff -r 4010d0e4f63ac8ad15c8655118dc9ac3c8822b72 -r 84b917f54846abdf9a8e1832868bca72ca339e41 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -51,6 +51,8 @@
ARTFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
add_field = ARTFieldInfo.add_field
+_ptypes = ["all", "darkmatter", "stars", "specie0"]
+
for f in fluid_fields:
add_art_field(f, function=NullFunc, take_log=True,
validators=[ValidateDataField(f)])
@@ -224,7 +226,6 @@
particle_type=True,
convert_function=lambda x: x.convert("particle_mass"))
-
def _particle_age(field, data):
tr = data["particle_creation_time"]
return data.pf.current_time - tr
@@ -269,8 +270,6 @@
take_log=True, units=r"\rm{Msun}")
# Particle Deposition Fields
-_ptypes = ["all", "darkmatter", "stars", "specie0"]
-
for _ptype in _ptypes:
particle_vector_functions(_ptype, ["particle_position_%s" % ax for ax in 'xyz'],
["particle_velocity_%s" % ax for ax in 'xyz'],
@@ -293,6 +292,19 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "stars_density"]
+ rho += data["gas", "Density"]
+ return rho * data["CellVolume"]
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -306,6 +318,19 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "baryon_density"]
+ rho += data["deposit", "specie0_density"]
+ return rho * data["CellVolume"]
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
@@ -319,3 +344,16 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def multimass_mass(field, data):
+ rho = data["deposit", "baryon_density"]
+ rho += data["deposit", "darkmatter_density"]
+ return rho * data["CellVolume"]
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
https://bitbucket.org/yt_analysis/yt-3.0/commits/d53ed3e653f2/
Changeset: d53ed3e653f2
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-15 06:19:08
Summary: bandaid for resolving the ftype when we don't want to use the last fluid type as the default
Affected #: 1 file
diff -r 064dc07441c86efd0a32d616b013fdd77a7a88d1 -r d53ed3e653f2ff041a12ecfb17dff458c52e7a31 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -407,6 +407,8 @@
ftype = self._current_particle_type
else:
ftype = self._current_fluid_type
+ if (ftype, fname) not in self.pf.field_info:
+ ftype = "gas"
if finfo.particle_type and ftype not in self.pf.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.pf.fluid_types:
https://bitbucket.org/yt_analysis/yt-3.0/commits/d56bd1ebfab2/
Changeset: d56bd1ebfab2
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-15 06:19:28
Summary: adding mass fields for each density field
Affected #: 1 file
diff -r d53ed3e653f2ff041a12ecfb17dff458c52e7a31 -r d56bd1ebfab2fda331724f8c92605f5f885acb6b yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -293,6 +293,19 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "stars_mass"]
+ rho += data["gas", "CellMass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -306,6 +319,32 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "baryon_mass"]
+ rho += data["deposit", "specie0_mass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
+def multimass_mass(field, data):
+ rho = data["deposit", "baryon_mass"]
+ rho += data["deposit", "darkmatter_mass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
https://bitbucket.org/yt_analysis/yt-3.0/commits/ba2ad90a561e/
Changeset: ba2ad90a561e
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-30 07:37:16
Summary: Merge
Affected #: 6 files
diff -r d56bd1ebfab2fda331724f8c92605f5f885acb6b -r ba2ad90a561e460a5f68dd2438025237b812ffc2 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -190,7 +190,7 @@
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
+ print "local_parts", local_parts
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
@@ -201,8 +201,12 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
+ if "grids" in dir(pf.h):
+ sources = pf.h._get_objs("grids")
+ else:
+ sources = [pf.h.all_data()]
+ for g in sources:
+ if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
@@ -233,6 +237,7 @@
fi += 1
pi += npart
num_p[0] = local_parts
+ print "finished read"
cdef class RockstarInterface:
diff -r d56bd1ebfab2fda331724f8c92605f5f885acb6b -r ba2ad90a561e460a5f68dd2438025237b812ffc2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -234,6 +234,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
diff -r d56bd1ebfab2fda331724f8c92605f5f885acb6b -r ba2ad90a561e460a5f68dd2438025237b812ffc2 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -244,17 +244,36 @@
j_mag = [amx.sum(), amy.sum(), amz.sum()]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
diff -r d56bd1ebfab2fda331724f8c92605f5f885acb6b -r ba2ad90a561e460a5f68dd2438025237b812ffc2 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -264,6 +264,8 @@
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
if fname == self._last_freq[1]:
+ mylog.debug("Guessing field %s is (%s, %s)", fname,
+ self._last_freq[0], self._last_freq[1])
return self._last_finfo
if fname in self.field_info:
self._last_freq = field
@@ -274,6 +276,8 @@
if guessing_type and ("all", fname) in self.field_info:
self._last_freq = ("all", fname)
self._last_finfo = self.field_info["all", fname]
+ mylog.debug("Guessing field %s is (%s, %s)", fname,
+ "all", fname)
return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
diff -r d56bd1ebfab2fda331724f8c92605f5f885acb6b -r ba2ad90a561e460a5f68dd2438025237b812ffc2 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -5,6 +5,8 @@
Author: Matthew Turk <matthewturk at gmail.com>
Affiliation: KIPAC/SLAC/Stanford
+Author: Chris Moody <chrisemoody at gmail.com>
+Affiliation: UCSC
Homepage: http://yt-project.org/
License:
Copyright (C) 2008-2011 Matthew Turk. All Rights Reserved.
@@ -732,17 +734,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -871,6 +873,120 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return sphrt
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphrp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleRadialVelocity(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ vel = "particle_velocity_%s"
+ vel = np.array([data[vel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphr = get_sph_r_component(vel, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadialVelocity", function=_ParticleRadialVelocity,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaVelocity(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ vel = "particle_velocity_%s"
+ vel = np.array([data[vel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ spht = get_sph_theta_component(vel, theta, phi, normal)
+ return sphrt
+
+add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiVelocity(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ vel = "particle_velocity_%s"
+ vel = np.array([data[vel % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(vel, theta, phi, normal)
+ return sphrp
+
+add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _TangentialVelocity(field, data):
return np.sqrt(data["VelocityMagnitude"]**2.0
- data["RadialVelocity"]**2.0)
https://bitbucket.org/yt_analysis/yt-3.0/commits/e80deb0f1b32/
Changeset: e80deb0f1b32
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-30 07:38:09
Summary: remove prints from R*
Affected #: 1 file
diff -r ba2ad90a561e460a5f68dd2438025237b812ffc2 -r e80deb0f1b3231dcb2783a92d828cd89a7134a98 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -190,8 +190,6 @@
else:
local_parts = TOTAL_PARTICLES
- print "local_parts", local_parts
-
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -237,7 +235,6 @@
fi += 1
pi += npart
num_p[0] = local_parts
- print "finished read"
cdef class RockstarInterface:
https://bitbucket.org/yt_analysis/yt-3.0/commits/197f7ea838cf/
Changeset: 197f7ea838cf
Branch: yt-3.0
User: juxtaposicion
Date: 2013-06-30 21:33:06
Summary: added half mass quantity
Affected #: 1 file
diff -r e80deb0f1b3231dcb2783a92d828cd89a7134a98 -r 197f7ea838cfde217a3dd73461aab9612e285fdd yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -742,3 +742,25 @@
add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
combine_function=_combParticleDensityCenter,n_ret=2)
+
+def _HalfMass(data, field):
+ """
+ Cumulative sum the given mass field and find
+ at what radius the half mass is. Simple but
+ memory-expensive method.
+ """
+ d = data[field]
+ r = data['Radius']
+ return d, r
+
+def _combHalfMass(data, field_vals, radii):
+ fv = np.concatenate(field_vals).ravel()
+ r = np.concatenate(radii).ravel()
+ idx = np.argsort(r)
+ r = r[idx]
+ fv = np.cumsum(fv[idx])
+ idx = np.where(fv / fv[-1] > fv[1] / 2.0)[0][0]
+ return r[idx]
+
+add_quantity("HalfMass",function=_HalfMass,
+ combine_function=_combHalfMass,n_ret=2)
https://bitbucket.org/yt_analysis/yt-3.0/commits/6e8e3838765b/
Changeset: 6e8e3838765b
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 20:50:34
Summary: fixing half mass wuantity
Affected #: 1 file
diff -r 197f7ea838cfde217a3dd73461aab9612e285fdd -r 6e8e3838765b7758e4189e01e9ef5370d470a165 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -749,17 +749,17 @@
at what radius the half mass is. Simple but
memory-expensive method.
"""
- d = data[field]
+ d = np.nan_to_num(data[field])
r = data['Radius']
return d, r
-def _combHalfMass(data, field_vals, radii):
- fv = np.concatenate(field_vals).ravel()
- r = np.concatenate(radii).ravel()
+def _combHalfMass(data, field_vals, radii, frac=0.5):
+ fv = np.concatenate(field_vals.tolist()).ravel()
+ r = np.concatenate(radii.tolist()).ravel()
idx = np.argsort(r)
r = r[idx]
fv = np.cumsum(fv[idx])
- idx = np.where(fv / fv[-1] > fv[1] / 2.0)[0][0]
+ idx = np.where(fv / fv[-1] > frac)[0][0]
return r[idx]
add_quantity("HalfMass",function=_HalfMass,
https://bitbucket.org/yt_analysis/yt-3.0/commits/39e94be3942d/
Changeset: 39e94be3942d
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 21:00:07
Summary: Merged with octdiet
Affected #: 5 files
diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r 39e94be3942d7b7b4490e86256628ab873ee07b2 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -190,8 +190,6 @@
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
-
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -201,8 +199,12 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
+ if "grids" in dir(pf.h):
+ sources = pf.h._get_objs("grids")
+ else:
+ sources = [pf.h.all_data()]
+ for g in sources:
+ if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r 39e94be3942d7b7b4490e86256628ab873ee07b2 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -234,6 +234,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
@@ -397,6 +398,8 @@
ftype = self._current_particle_type
else:
ftype = self._current_fluid_type
+ if (ftype, fname) not in self.pf.field_info:
+ ftype = "gas"
if finfo.particle_type and ftype not in self.pf.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.pf.fluid_types:
diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r 39e94be3942d7b7b4490e86256628ab873ee07b2 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -244,17 +244,36 @@
j_mag = [amx.sum(), amy.sum(), amz.sum()]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
@@ -723,3 +742,25 @@
add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
combine_function=_combParticleDensityCenter,n_ret=2)
+
+def _HalfMass(data, field):
+ """
+ Cumulative sum the given mass field and find
+ at what radius the half mass is. Simple but
+ memory-expensive method.
+ """
+ d = np.nan_to_num(data[field])
+ r = data['Radius']
+ return d, r
+
+def _combHalfMass(data, field_vals, radii, frac=0.5):
+ fv = np.concatenate(field_vals.tolist()).ravel()
+ r = np.concatenate(radii.tolist()).ravel()
+ idx = np.argsort(r)
+ r = r[idx]
+ fv = np.cumsum(fv[idx])
+ idx = np.where(fv / fv[-1] > frac)[0][0]
+ return r[idx]
+
+add_quantity("HalfMass",function=_HalfMass,
+ combine_function=_combHalfMass,n_ret=2)
diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r 39e94be3942d7b7b4490e86256628ab873ee07b2 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -737,17 +737,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -876,6 +876,60 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return sphrt
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos.copy(), center)
+ phi = get_sph_phi(pos.copy(), center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphrp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _ParticleRadialVelocity(field, data):
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
diff -r 92a91071ac6b84e1008646de7f2210e6841db9f0 -r 39e94be3942d7b7b4490e86256628ab873ee07b2 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -293,6 +293,19 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "stars_mass"]
+ rho += data["gas", "CellMass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -306,6 +319,32 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "baryon_mass"]
+ rho += data["deposit", "specie0_mass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
+def multimass_mass(field, data):
+ rho = data["deposit", "baryon_mass"]
+ rho += data["deposit", "darkmatter_mass"]
+ return rho
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
https://bitbucket.org/yt_analysis/yt-3.0/commits/116d3c21f62f/
Changeset: 116d3c21f62f
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 21:44:29
Summary: mass fields now depends on density
Affected #: 1 file
diff -r 39e94be3942d7b7b4490e86256628ab873ee07b2 -r 116d3c21f62f5e8e50ef7ed0a136da59aec68bc0 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -294,9 +294,8 @@
projection_conversion = 'cm')
def baryon_mass(field, data):
- rho = data["deposit", "stars_mass"]
- rho += data["gas", "CellMass"]
- return rho
+ rho = data["deposit", "baryon_density"]
+ return rho * data['CellVolume']
ARTFieldInfo.add_field(("deposit", "baryon_mass"),
function = baryon_mass,
@@ -320,9 +319,8 @@
projection_conversion = 'cm')
def total_mass(field, data):
- rho = data["deposit", "baryon_mass"]
- rho += data["deposit", "specie0_mass"]
- return rho
+ rho = data["deposit", "total_density"]
+ return rho * data['CellVolume']
ARTFieldInfo.add_field(("deposit", "total_mass"),
function = total_mass,
@@ -332,19 +330,6 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
-def multimass_mass(field, data):
- rho = data["deposit", "baryon_mass"]
- rho += data["deposit", "darkmatter_mass"]
- return rho
-
-ARTFieldInfo.add_field(("deposit", "multimass_mass"),
- function = multimass_mass,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{Multimass Mass}",
- units = r"\mathrm{g}/\mathrm{cm}^{3}",
- projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
- projection_conversion = 'cm')
-
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
@@ -358,3 +343,15 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def multimass_mass(field, data):
+ rho = data["deposit", "multimass_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
https://bitbucket.org/yt_analysis/yt-3.0/commits/2e32a398829f/
Changeset: 2e32a398829f
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 22:27:13
Summary: removed the field guessing debug log. printed confusing information
Affected #: 1 file
diff -r 116d3c21f62f5e8e50ef7ed0a136da59aec68bc0 -r 2e32a398829fa012d6e9a206c17d316795dbc17a yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -264,8 +264,6 @@
self._last_finfo = self.field_info[(ftype, fname)]
return self._last_finfo
if fname == self._last_freq[1]:
- mylog.debug("Guessing field %s is (%s, %s)", fname,
- self._last_freq[0], self._last_freq[1])
return self._last_finfo
if fname in self.field_info:
self._last_freq = field
https://bitbucket.org/yt_analysis/yt-3.0/commits/aee9f8330af4/
Changeset: aee9f8330af4
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 22:38:40
Summary: added field guessing in the right place
Affected #: 1 file
diff -r 2e32a398829fa012d6e9a206c17d316795dbc17a -r aee9f8330af4ed864208250712d33eed2420f798 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -273,9 +273,11 @@
if hasattr(self.pf, "field_info"):
if not isinstance(item, tuple):
field = ("unknown", item)
+ finfo = self.pf._get_field_info(*field)
+ mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
- finfo = self.pf._get_field_info(*field)
+ finfo = self.pf._get_field_info(*field)
else:
FI = getattr(self.pf, "field_info", FieldInfo)
if item in FI:
https://bitbucket.org/yt_analysis/yt-3.0/commits/b6bc3cc590e8/
Changeset: b6bc3cc590e8
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 23:08:48
Summary: fixing typos in spherical coordinate particle fields
Affected #: 1 file
diff -r aee9f8330af4ed864208250712d33eed2420f798 -r b6bc3cc590e8e2dc5e8d62c3b87add02764485e8 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -882,10 +882,9 @@
bv = data.get_field_parameter("bulk_velocity")
pos = "particle_position_%s"
pos = np.array([data[pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
- vel = vel - np.reshape(bv, (3, 1))
sphr = get_sph_r_component(pos, theta, phi, normal)
return sphr
@@ -900,12 +899,11 @@
bv = data.get_field_parameter("bulk_velocity")
pos = "particle_position_%s"
pos = np.array([data[pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
- vel = vel - np.reshape(bv, (3, 1))
spht = get_sph_theta_component(pos, theta, phi, normal)
- return sphrt
+ return spht
add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
particle_type=True, units=r"\rm{cm}/\rm{s}",
@@ -918,12 +916,12 @@
bv = data.get_field_parameter("bulk_velocity")
pos = "particle_position_%s"
pos = np.array([data[pos % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphp = get_sph_phi_component(pos, theta, phi, normal)
- return sphrp
+ return sphp
add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
particle_type=True, units=r"\rm{cm}/\rm{s}",
@@ -938,8 +936,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -958,12 +956,12 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
spht = get_sph_theta_component(vel, theta, phi, normal)
- return sphrt
+ return spht
add_field("ParticleThetaVelocity", function=_ParticleThetaVelocity,
particle_type=True, units=r"\rm{cm}/\rm{s}",
@@ -978,12 +976,12 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphp = get_sph_phi_component(vel, theta, phi, normal)
- return sphrp
+ return sphp
add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
particle_type=True, units=r"\rm{cm}/\rm{s}",
https://bitbucket.org/yt_analysis/yt-3.0/commits/06a8233252c2/
Changeset: 06a8233252c2
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 23:46:56
Summary: Merged yt_analysis/yt-3.0 into yt-3.0
Affected #: 1 file
diff -r b6bc3cc590e8e2dc5e8d62c3b87add02764485e8 -r 06a8233252c27d569d9c47d8178ca34fee09a844 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -60,6 +60,7 @@
geometry = "cartesian"
coordinates = None
max_level = 99
+ storage_filename = None
class __metaclass__(type):
def __init__(cls, name, b, d):
https://bitbucket.org/yt_analysis/yt-3.0/commits/cb34a01209ef/
Changeset: cb34a01209ef
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 23:16:58
Summary: added a border case for half mass when total mass is zero
Affected #: 1 file
diff -r b6bc3cc590e8e2dc5e8d62c3b87add02764485e8 -r cb34a01209effa37e40004d9bae1986563c4d965 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -759,8 +759,11 @@
idx = np.argsort(r)
r = r[idx]
fv = np.cumsum(fv[idx])
- idx = np.where(fv / fv[-1] > frac)[0][0]
- return r[idx]
+ idx, = np.where(fv / fv[-1] > frac)
+ if len(idx) > 0:
+ return r[idx[0]]
+ else:
+ return np.nan
add_quantity("HalfMass",function=_HalfMass,
combine_function=_combHalfMass,n_ret=2)
https://bitbucket.org/yt_analysis/yt-3.0/commits/9ee107d400f8/
Changeset: 9ee107d400f8
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-01 23:47:54
Summary: Merge
Affected #: 1 file
diff -r cb34a01209effa37e40004d9bae1986563c4d965 -r 9ee107d400f8734cdd60469432f55f1a737991d3 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -60,6 +60,7 @@
geometry = "cartesian"
coordinates = None
max_level = 99
+ storage_filename = None
class __metaclass__(type):
def __init__(cls, name, b, d):
https://bitbucket.org/yt_analysis/yt-3.0/commits/9da7774e9b4f/
Changeset: 9da7774e9b4f
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-02 20:20:24
Summary: Merged yt_analysis/yt-3.0 into yt-3.0
Affected #: 12 files
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -286,7 +286,8 @@
# This needs to be parallel_objects-ified
for chunk in parallel_objects(self.data_source.chunks(
chunk_fields, "io")):
- mylog.debug("Adding chunk (%s) to tree", chunk.ires.size)
+ mylog.debug("Adding chunk (%s) to tree (%0.3e GB RAM)", chunk.ires.size,
+ get_memory_usage()/1024.)
self._handle_chunk(chunk, fields, tree)
# Note that this will briefly double RAM usage
if self.proj_style == "mip":
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -247,7 +247,7 @@
rv = np.empty(self.ires.size, dtype="float64")
ind = 0
if ngz == 0:
- for io_chunk in self.chunks([], "io"):
+ for io_chunk in self.chunks([], "io", cache = False):
for i,chunk in enumerate(self.chunks(field, "spatial", ngz = 0)):
ind += self._current_chunk.objs[0].select(
self.selector, self[field], rv, ind)
@@ -280,8 +280,8 @@
size = self._count_particles(ftype)
rv = np.empty(size, dtype="float64")
ind = 0
- for io_chunk in self.chunks([], "io"):
- for i,chunk in enumerate(self.chunks(field, "spatial")):
+ for io_chunk in self.chunks([], "io", cache = False):
+ for i, chunk in enumerate(self.chunks(field, "spatial")):
x, y, z = (self[ftype, 'particle_position_%s' % ax]
for ax in 'xyz')
if x.size == 0: continue
@@ -302,7 +302,7 @@
if f1 == ftype:
return val.size
size = 0
- for io_chunk in self.chunks([], "io"):
+ for io_chunk in self.chunks([], "io", cache = False):
for i,chunk in enumerate(self.chunks([], "spatial")):
x, y, z = (self[ftype, 'particle_position_%s' % ax]
for ax in 'xyz')
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/art/data_structures.py
--- a/yt/frontends/art/data_structures.py
+++ b/yt/frontends/art/data_structures.py
@@ -180,7 +180,7 @@
g = og
yield YTDataChunk(dobj, "spatial", [g], None)
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
"""
Since subsets are calculated per domain,
i.e. per file, yield each domain at a time to
@@ -189,7 +189,8 @@
"""
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
- yield YTDataChunk(dobj, "io", [subset], None)
+ yield YTDataChunk(dobj, "io", [subset], None,
+ cache = cache)
class ARTStaticOutput(StaticOutput):
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -232,11 +232,12 @@
def _chunk_spatial(self, dobj, ngz):
raise NotImplementedError
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
# _current_chunk is made from identify_base_chunk
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for chunk in oobjs:
- yield YTDataChunk(dobj, "io", [chunk], self._data_size)
+ yield YTDataChunk(dobj, "io", [chunk], self._data_size,
+ cache = cache)
def _read_fluid_fields(self, fields, dobj, chunk=None):
if len(fields) == 0:
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/enzo/fields.py
--- a/yt/frontends/enzo/fields.py
+++ b/yt/frontends/enzo/fields.py
@@ -388,14 +388,16 @@
"""
particle_field = field.name[4:]
pos = data[('all', 'Coordinates')]
+ # Get back into density
+ pden = data['all', 'particle_mass'] / data["CellVolume"]
top = data.deposit(
pos,
- [data[('all', particle_field)]*data[('all', 'particle_mass')]],
+ [data[('all', particle_field)]*pden],
method = 'cic'
)
bottom = data.deposit(
pos,
- [data[('all', 'particle_mass')]],
+ [pden],
method = 'cic'
)
top[bottom == 0] = 0.0
@@ -513,7 +515,22 @@
add_enzo_field(pf, function=NullFunc,
validators = [ValidateDataField(pf)],
particle_type=True)
-add_field(('all', "particle_mass"), function=NullFunc, particle_type=True)
+
+def _convertParticleMass(data):
+ return data.convert("Density")*(data.convert("cm")**3.0)
+def _convertParticleMassMsun(data):
+ return data.convert("Density")*((data.convert("cm")**3.0)/mass_sun_cgs)
+# We have now multiplied by grid.dds.prod() inside the IO function.
+# So here we multiply just by the conversion to density.
+add_field(('all', "particle_mass"), function=NullFunc,
+ particle_type=True, convert_function = _convertParticleMass)
+
+add_field("ParticleMass",
+ function=TranslationFunc("particle_mass"),
+ particle_type=True, convert_function=_convertParticleMass)
+add_field("ParticleMassMsun",
+ function=TranslationFunc("particle_mass"),
+ particle_type=True, convert_function=_convertParticleMassMsun)
def _ParticleAge(field, data):
current_time = data.pf.current_time
@@ -524,32 +541,6 @@
validators=[ValidateDataField("creation_time")],
particle_type=True, convert_function=_convertParticleAge)
-def _ParticleMass(field, data):
- particles = data['all', "particle_mass"].astype('float64') * \
- just_one(data["CellVolumeCode"].ravel())
- # Note that we mandate grid-type here, so this is okay
- return particles
-
-def _convertParticleMass(data):
- return data.convert("Density")*(data.convert("cm")**3.0)
-def _IOLevelParticleMass(grid):
- dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
- cf = (_ParticleMass(None, dd) * _convertParticleMass(grid))[0]
- return cf
-def _convertParticleMassMsun(data):
- return data.convert("Density")*((data.convert("cm")**3.0)/1.989e33)
-def _IOLevelParticleMassMsun(grid):
- dd = dict(particle_mass = np.ones(1), CellVolumeCode=grid["CellVolumeCode"])
- cf = (_ParticleMass(None, dd) * _convertParticleMassMsun(grid))[0]
- return cf
-add_field("ParticleMass",
- function=_ParticleMass, validators=[ValidateSpatial(0)],
- particle_type=True, convert_function=_convertParticleMass,
- particle_convert_function=_IOLevelParticleMass)
-add_field("ParticleMassMsun",
- function=_ParticleMass, validators=[ValidateSpatial(0)],
- particle_type=True, convert_function=_convertParticleMassMsun,
- particle_convert_function=_IOLevelParticleMassMsun)
#
# Now we do overrides for 2D fields
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,6 +36,8 @@
import numpy as np
from yt.funcs import *
+_convert_mass = ("particle_mass",)
+
class IOHandlerPackedHDF5(BaseIOHandler):
_data_style = "enzo_packed_3d"
@@ -81,6 +83,8 @@
for field in set(fields):
ftype, fname = field
gdata = data[g.id].pop(fname)[mask]
+ if fname == "particle_mass":
+ gdata *= g.dds.prod()
rv[field][ind:ind+gdata.size] = gdata
ind += gdata.size
data.pop(g.id)
@@ -130,6 +134,8 @@
for field in set(fields):
ftype, fname = field
gdata = data[g.id].pop(fname)[mask]
+ if fname == "particle_mass":
+ gdata *= g.dds.prod()
rv[field][ind:ind+gdata.size] = gdata
ind += gdata.size
return rv
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,12 +212,13 @@
def _setup_data_io(self):
self.io = io_registry[self.data_style](self.parameter_file)
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
# We'll take the max of 128 and the number of processors
nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
for gs in list_chunks(gobjs, nl):
- yield YTDataChunk(dobj, "io", gs, self._count_selection)
+ yield YTDataChunk(dobj, "io", gs, self._count_selection,
+ cache = cache)
class FLASHStaticOutput(StaticOutput):
_hierarchy_class = FLASHHierarchy
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -348,10 +348,10 @@
g = og
yield YTDataChunk(dobj, "spatial", [g], None)
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
- yield YTDataChunk(dobj, "io", [subset], None)
+ yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
class RAMSESStaticOutput(StaticOutput):
_hierarchy_class = RAMSESGeometryHandler
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -403,8 +403,9 @@
if len(fields_to_read) == 0:
return {}, fields_to_generate
fields_to_return = self.io._read_particle_selection(
- self._chunk_io(dobj), selector,
- fields_to_read)
+ self._chunk_io(dobj, cache = False),
+ selector,
+ fields_to_read)
for field in fields_to_read:
ftype, fname = field
finfo = self.pf._get_field_info(*field)
@@ -425,10 +426,11 @@
fields_to_read, fields_to_generate = self._split_fields(fields)
if len(fields_to_read) == 0:
return {}, fields_to_generate
- fields_to_return = self.io._read_fluid_selection(self._chunk_io(dobj),
- selector,
- fields_to_read,
- chunk_size)
+ fields_to_return = self.io._read_fluid_selection(
+ self._chunk_io(dobj, cache = False),
+ selector,
+ fields_to_read,
+ chunk_size)
for field in fields_to_read:
ftype, fname = field
conv_factor = self.pf.field_info[fname]._convert_function(self)
@@ -453,20 +455,30 @@
else:
raise NotImplementedError
+def cached_property(func):
+ n = '_%s' % func.func_name
+ def cached_func(self):
+ if self._cache and getattr(self, n, None) is not None:
+ return getattr(self, n)
+ if self.data_size is None:
+ tr = self._accumulate_values(n[1:])
+ else:
+ tr = func(self)
+ if self._cache:
+ setattr(self, n, tr)
+ return tr
+ return property(cached_func)
+
class YTDataChunk(object):
- def __init__(self, dobj, chunk_type, objs, data_size = None, field_type = None):
+ def __init__(self, dobj, chunk_type, objs, data_size = None,
+ field_type = None, cache = False):
self.dobj = dobj
self.chunk_type = chunk_type
self.objs = objs
- self._data_size = data_size
+ self.data_size = data_size
self._field_type = field_type
-
- @property
- def data_size(self):
- if callable(self._data_size):
- self._data_size = self._data_size(self.dobj, self.objs)
- return self._data_size
+ self._cache = cache
def _accumulate_values(self, method):
# We call this generically. It's somewhat slower, since we're doing
@@ -477,35 +489,25 @@
f = getattr(obj, mname)
arrs.append(f(self.dobj))
arrs = np.concatenate(arrs)
- self._data_size = arrs.shape[0]
+ self.data_size = arrs.shape[0]
return arrs
- _fcoords = None
- @property
+ @cached_property
def fcoords(self):
- if self.data_size is None:
- self._fcoords = self._accumulate_values("fcoords")
- if self._fcoords is not None: return self._fcoords
ci = np.empty((self.data_size, 3), dtype='float64')
- self._fcoords = ci
- if self.data_size == 0: return self._fcoords
+ if self.data_size == 0: return ci
ind = 0
for obj in self.objs:
c = obj.select_fcoords(self.dobj)
if c.shape[0] == 0: continue
ci[ind:ind+c.shape[0], :] = c
ind += c.shape[0]
- return self._fcoords
+ return ci
- _icoords = None
- @property
+ @cached_property
def icoords(self):
- if self.data_size is None:
- self._icoords = self._accumulate_values("icoords")
- if self._icoords is not None: return self._icoords
ci = np.empty((self.data_size, 3), dtype='int64')
- self._icoords = ci
- if self.data_size == 0: return self._icoords
+ if self.data_size == 0: return ci
ind = 0
for obj in self.objs:
c = obj.select_icoords(self.dobj)
@@ -514,15 +516,10 @@
ind += c.shape[0]
return ci
- _fwidth = None
- @property
+ @cached_property
def fwidth(self):
- if self.data_size is None:
- self._fwidth = self._accumulate_values("fwidth")
- if self._fwidth is not None: return self._fwidth
ci = np.empty((self.data_size, 3), dtype='float64')
- self._fwidth = ci
- if self.data_size == 0: return self._fwidth
+ if self.data_size == 0: return ci
ind = 0
for obj in self.objs:
c = obj.select_fwidth(self.dobj)
@@ -531,15 +528,10 @@
ind += c.shape[0]
return ci
- _ires = None
- @property
+ @cached_property
def ires(self):
- if self.data_size is None:
- self._ires = self._accumulate_values("ires")
- if self._ires is not None: return self._ires
ci = np.empty(self.data_size, dtype='int64')
- self._ires = ci
- if self.data_size == 0: return self._ires
+ if self.data_size == 0: return ci
ind = 0
for obj in self.objs:
c = obj.select_ires(self.dobj)
@@ -548,22 +540,17 @@
ind += c.size
return ci
- _tcoords = None
- @property
+ @cached_property
def tcoords(self):
- if self._tcoords is None:
- self.dtcoords
+ self.dtcoords
return self._tcoords
- _dtcoords = None
- @property
+ @cached_property
def dtcoords(self):
- if self._dtcoords is not None: return self._dtcoords
ct = np.empty(self.data_size, dtype='float64')
cdt = np.empty(self.data_size, dtype='float64')
- self._tcoords = ct
- self._dtcoords = cdt
- if self.data_size == 0: return self._dtcoords
+ self._tcoords = ct # Se this for tcoords
+ if self.data_size == 0: return cdt
ind = 0
for obj in self.objs:
gdt, gt = obj.tcoords(self.dobj)
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/geometry/grid_geometry_handler.py
--- a/yt/geometry/grid_geometry_handler.py
+++ b/yt/geometry/grid_geometry_handler.py
@@ -245,16 +245,16 @@
dobj.size = self._count_selection(dobj)
if getattr(dobj, "shape", None) is None:
dobj.shape = (dobj.size,)
- dobj._current_chunk = list(self._chunk_all(dobj))[0]
+ dobj._current_chunk = list(self._chunk_all(dobj, cache = False))[0]
def _count_selection(self, dobj, grids = None):
if grids is None: grids = dobj._chunk_info
count = sum((g.count(dobj.selector) for g in grids))
return count
- def _chunk_all(self, dobj):
+ def _chunk_all(self, dobj, cache = True):
gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- yield YTDataChunk(dobj, "all", gobjs, dobj.size)
+ yield YTDataChunk(dobj, "all", gobjs, dobj.size, cache)
def _chunk_spatial(self, dobj, ngz, sort = None):
gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
@@ -271,13 +271,16 @@
g = og
size = self._count_selection(dobj, [og])
if size == 0: continue
- yield YTDataChunk(dobj, "spatial", [g], size)
+ # We don't want to cache any of the masks or icoords or fcoords for
+ # individual grids.
+ yield YTDataChunk(dobj, "spatial", [g], size, cache = False)
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
gfiles = defaultdict(list)
gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for g in gobjs:
gfiles[g.filename].append(g)
for fn in sorted(gfiles):
gs = gfiles[fn]
- yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs))
+ yield YTDataChunk(dobj, "io", gs, self._count_selection(dobj, gs),
+ cache = cache)
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -170,10 +170,10 @@
g = og
yield YTDataChunk(dobj, "spatial", [g])
- def _chunk_io(self, dobj):
+ def _chunk_io(self, dobj, cache = True):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
- yield YTDataChunk(dobj, "io", [subset], None)
+ yield YTDataChunk(dobj, "io", [subset], None, cache = cache)
class ParticleDataChunk(YTDataChunk):
def __init__(self, oct_handler, regions, *args, **kwargs):
diff -r 9ee107d400f8734cdd60469432f55f1a737991d3 -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 yt/utilities/lib/QuadTree.pyx
--- a/yt/utilities/lib/QuadTree.pyx
+++ b/yt/utilities/lib/QuadTree.pyx
@@ -131,7 +131,7 @@
cdef int i, j
cdef QuadTreeNode *node
cdef np.int64_t pos[2]
- cdef np.float64_t *vals = <np.float64_t *> alloca(
+ cdef np.float64_t *vals = <np.float64_t *> malloc(
sizeof(np.float64_t)*nvals)
cdef np.float64_t weight_val = 0.0
self.nvals = nvals
@@ -160,6 +160,7 @@
self.root_nodes[i][j] = QTN_initialize(
pos, nvals, vals, weight_val)
self.num_cells = self.top_grid_dims[0] * self.top_grid_dims[1]
+ free(vals)
cdef int count_total_cells(self, QuadTreeNode *root):
cdef int total = 0
@@ -373,7 +374,7 @@
cdef np.float64_t *vdata = <np.float64_t *> nvals.data
cdef np.float64_t *wdata = <np.float64_t *> nwvals.data
cdef np.float64_t wtoadd
- cdef np.float64_t *vtoadd = <np.float64_t *> alloca(
+ cdef np.float64_t *vtoadd = <np.float64_t *> malloc(
sizeof(np.float64_t)*self.nvals)
for i in range(self.top_grid_dims[0]):
for j in range(self.top_grid_dims[1]):
@@ -381,6 +382,7 @@
wtoadd = 0.0
curpos += self.fill(self.root_nodes[i][j],
curpos, px, py, pdx, pdy, vdata, wdata, vtoadd, wtoadd, 0)
+ free(vtoadd)
return opx, opy, opdx, opdy, nvals, nwvals
cdef int count(self, QuadTreeNode *node):
@@ -406,7 +408,7 @@
np.int64_t level):
cdef int i, j, n
cdef np.float64_t *vorig
- vorig = <np.float64_t *> alloca(sizeof(np.float64_t) * self.nvals)
+ vorig = <np.float64_t *> malloc(sizeof(np.float64_t) * self.nvals)
if node.children[0][0] == NULL:
if self.merged == -1:
for i in range(self.nvals):
@@ -444,6 +446,7 @@
for i in range(self.nvals):
vtoadd[i] = vorig[i]
wtoadd -= node.weight_val
+ free(vorig)
return added
@cython.boundscheck(False)
https://bitbucket.org/yt_analysis/yt-3.0/commits/d36695e30002/
Changeset: d36695e30002
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-17 01:25:43
Summary: Merge
Affected #: 57 files
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d setup.py
--- a/setup.py
+++ b/setup.py
@@ -6,8 +6,11 @@
import subprocess
import shutil
import glob
-import distribute_setup
-distribute_setup.use_setuptools()
+import setuptools
+from distutils.version import StrictVersion
+if StrictVersion(setuptools.__version__) < StrictVersion('0.7.0'):
+ import distribute_setup
+ distribute_setup.use_setuptools()
from distutils.command.build_py import build_py
from numpy.distutils.misc_util import appendpath
@@ -153,8 +156,6 @@
# End snippet
######
-import setuptools
-
VERSION = "3.0dev"
if os.path.exists('MANIFEST'):
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -83,3 +83,26 @@
"""
__version__ = "3.0-dev"
+
+def run_nose(verbose=False, run_answer_tests=False, answer_big_data=False):
+ import nose, os, sys
+ from yt.config import ytcfg
+ nose_argv = sys.argv
+ nose_argv += ['--exclude=answer_testing','--detailed-errors']
+ if verbose:
+ nose_argv.append('-v')
+ if run_answer_tests:
+ nose_argv.append('--with-answer-testing')
+ if answer_big_data:
+ nose_argv.append('--answer-big-data')
+ log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
+ ytcfg["yt","suppressStreamLogging"] = 'True'
+ initial_dir = os.getcwd()
+ yt_file = os.path.abspath(__file__)
+ yt_dir = os.path.dirname(yt_file)
+ os.chdir(yt_dir)
+ try:
+ nose.run(argv=nose_argv)
+ finally:
+ os.chdir(initial_dir)
+ ytcfg["yt","suppressStreamLogging"] = log_suppress
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
--- a/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
+++ b/yt/analysis_modules/radmc3d_export/RadMC3DInterface.py
@@ -158,7 +158,8 @@
self.layers.append(base_layer)
self.cell_count += np.product(pf.domain_dimensions)
- for grid in pf.h.grids:
+ sorted_grids = sorted(pf.h.grids, key=lambda x: x.Level)
+ for grid in sorted_grids:
if grid.Level <= self.max_level:
self._add_grid_to_layers(grid)
@@ -232,11 +233,11 @@
if p == 0:
ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
else:
- LE = np.zeros(3)
+ parent_LE = np.zeros(3)
for potential_parent in self.layers:
if potential_parent.id == p:
- LE = potential_parent.LeftEdge
- ind = (layer.LeftEdge - LE) / (2.0*dds) + 1
+ parent_LE = potential_parent.LeftEdge
+ ind = (layer.LeftEdge - parent_LE) / (2.0*dds) + 1
ix = int(ind[0]+0.5)
iy = int(ind[1]+0.5)
iz = int(ind[2]+0.5)
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -409,7 +409,8 @@
self.left_edge = np.array(left_edge)
self.level = level
rdx = self.pf.domain_dimensions*self.pf.refine_by**level
- self.dds = self.pf.domain_width/rdx.astype("float64")
+ rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1 # issue 602
+ self.dds = self.pf.domain_width / rdx.astype("float64")
self.ActiveDimensions = np.array(dims, dtype='int32')
self.right_edge = self.left_edge + self.ActiveDimensions*self.dds
self._num_ghost_zones = num_ghost_zones
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -219,11 +219,14 @@
finfo = self.pf._get_field_info(*field)
with self._field_type_state(ftype, finfo):
if fname in self._container_fields:
- return self._generate_container_field(field)
+ tr = self._generate_container_field(field)
if finfo.particle_type:
- return self._generate_particle_field(field)
+ tr = self._generate_particle_field(field)
else:
- return self._generate_fluid_field(field)
+ tr = self._generate_fluid_field(field)
+ if tr is None:
+ raise YTCouldNotGenerateField(field, self.pf)
+ return tr
def _generate_fluid_field(self, field):
# First we check the validator
@@ -467,7 +470,21 @@
if self._current_chunk is None:
self.hierarchy._identify_base_chunk(self)
if fields is None: return
- fields = self._determine_fields(fields)
+ nfields = []
+ apply_fields = defaultdict(list)
+ for field in self._determine_fields(fields):
+ if field[0] in self.pf.h.filtered_particle_types:
+ f = self.pf.known_filters[field[0]]
+ apply_fields[field[0]].append(
+ (f.filtered_type, field[1]))
+ else:
+ nfields.append(field)
+ for filter_type in apply_fields:
+ f = self.pf.known_filters[filter_type]
+ with f.apply(self):
+ self.get_data(apply_fields[filter_type])
+ fields = nfields
+ if len(fields) == 0: return
# Now we collect all our fields
# Here is where we need to perform a validation step, so that if we
# have a field requested that we actually *can't* yet get, we put it
@@ -603,14 +620,15 @@
def _get_pw(self, fields, center, width, origin, axes_unit, plot_type):
axis = self.axis
self.fields = [k for k in self.field_data.keys()
- if k not in self._container_fields]
+ if k not in self._key_fields]
from yt.visualization.plot_window import \
GetWindowParameters, PWViewerMPL
from yt.visualization.fixed_resolution import FixedResolutionBuffer
(bounds, center, units) = GetWindowParameters(axis, center, width, self.pf)
if axes_unit is None and units != ('1', '1'):
axes_unit = units
- pw = PWViewerMPL(self, bounds, origin=origin, frb_generator=FixedResolutionBuffer,
+ pw = PWViewerMPL(self, bounds, fields=list(self.fields), origin=origin,
+ frb_generator=FixedResolutionBuffer,
plot_type=plot_type)
pw.set_axes_unit(axes_unit)
return pw
@@ -1165,6 +1183,7 @@
class YTValueCutExtractionBase(YTSelectionContainer3D):
_type_name = "cut_region"
+ _con_args = ("_base_region", "_field_cuts")
"""
In-line extracted regions accept a base region and a set of field_cuts to
determine which points in a grid should be included.
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -40,7 +40,7 @@
gravitational_constant_cgs, \
mass_sun_cgs, \
HUGE
-
+from yt.utilities.math_utils import prec_accum
__CUDA_BLOCK_SIZE = 256
@@ -119,12 +119,15 @@
This function takes no arguments and returns the sum of cell masses and
particle masses in the object.
"""
- baryon_mass = data["CellMassMsun"].sum()
try:
- particle_mass = data["ParticleMassMsun"].sum()
- total_mass = baryon_mass + particle_mass
+ cell_mass = _TotalQuantity(data,["CellMassMsun"])
except KeyError:
- total_mass = baryon_mass
+ cell_mass = 0.0
+ try:
+ particle_mass = _TotalQuantity(data,["ParticleMassMsun"])
+ except KeyError:
+ particle_mass = 0.0
+ total_mass = cell_mass + particle_mass
return [total_mass]
def _combTotalMass(data, total_mass):
return total_mass.sum()
@@ -146,15 +149,15 @@
"""
x = y = z = den = 0
if use_cells:
- x += (data["x"] * data["CellMassMsun"]).sum()
- y += (data["y"] * data["CellMassMsun"]).sum()
- z += (data["z"] * data["CellMassMsun"]).sum()
- den += data["CellMassMsun"].sum()
+ x += (data["x"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ y += (data["y"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ z += (data["z"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ den += data["CellMassMsun"].sum(dtype=np.float64)
if use_particles:
- x += (data["particle_position_x"] * data["ParticleMassMsun"]).sum()
- y += (data["particle_position_y"] * data["ParticleMassMsun"]).sum()
- z += (data["particle_position_z"] * data["ParticleMassMsun"]).sum()
- den += data["ParticleMassMsun"].sum()
+ x += (data["particle_position_x"] * data["ParticleMassMsun"]).sum(dtype=np.float64)
+ y += (data["particle_position_y"] * data["ParticleMassMsun"]).sum(dtype=np.float64)
+ z += (data["particle_position_z"] * data["ParticleMassMsun"]).sum(dtype=np.float64)
+ den += data["ParticleMassMsun"].sum(dtype=np.float64)
return x,y,z, den
def _combCenterOfMass(data, x,y,z, den):
@@ -169,8 +172,8 @@
:param field: The field to average
:param weight: The field to weight by
"""
- num = (data[field] * data[weight]).sum()
- den = data[weight].sum()
+ num = (data[field] * data[weight]).sum(dtype=np.float64)
+ den = data[weight].sum(dtype=np.float64)
return num, den
def _combWeightedAverageQuantity(data, field, weight):
return field.sum()/weight.sum()
@@ -186,11 +189,11 @@
Returns the weighted variance and the weighted mean.
"""
- my_weight = data[weight].sum()
+ my_weight = data[weight].sum(dtype=np.float64)
if my_weight == 0:
return 0.0, 0.0, 0.0
- my_mean = (data[field] * data[weight]).sum() / my_weight
- my_var2 = (data[weight] * (data[field] - my_mean)**2).sum() / my_weight
+ my_mean = (data[field] * data[weight]).sum(dtype=np.float64) / my_weight
+ my_var2 = (data[weight] * (data[field] - my_mean)**2).sum(dtype=np.float64) / my_weight
return my_weight, my_mean, my_var2
def _combWeightedVariance(data, my_weight, my_mean, my_var2):
all_weight = my_weight.sum()
@@ -204,10 +207,10 @@
"""
This function returns the mass-weighted average velocity in the object.
"""
- xv = (data["x-velocity"] * data["CellMassMsun"]).sum()
- yv = (data["y-velocity"] * data["CellMassMsun"]).sum()
- zv = (data["z-velocity"] * data["CellMassMsun"]).sum()
- w = data["CellMassMsun"].sum()
+ xv = (data["x-velocity"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ yv = (data["y-velocity"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ zv = (data["z-velocity"] * data["CellMassMsun"]).sum(dtype=np.float64)
+ w = data["CellMassMsun"].sum(dtype=np.float64)
return xv, yv, zv, w
def _combBulkVelocity(data, xv, yv, zv, w):
w = w.sum()
@@ -225,7 +228,7 @@
amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
- j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
def _StarAngularMomentumVector(data):
@@ -241,7 +244,7 @@
amx = sLx * star_mass
amy = sLy * star_mass
amz = sLz * star_mass
- j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
def _ParticleAngularMomentumVector(data):
@@ -261,8 +264,8 @@
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
- L_vec = j_mag.sum(axis=0)
- L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum())
+ L_vec = j_mag.sum(axis=0,dtype=np.float64)
+ L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum(dtype=np.float64))
return L_vec_norm
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
@@ -279,13 +282,13 @@
This function returns the spin parameter for the baryons, but it uses
the particles in calculating enclosed mass.
"""
- m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
+ m_enc = _TotalMass(data)
amx = data["SpecificAngularMomentumX"]*data["CellMassMsun"]
amy = data["SpecificAngularMomentumY"]*data["CellMassMsun"]
amz = data["SpecificAngularMomentumZ"]*data["CellMassMsun"]
- j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
- e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0)
- weight=data["CellMassMsun"].sum()
+ j_mag = np.array([amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)])
+ e_term_pre = np.sum(data["CellMassMsun"]*data["VelocityMagnitude"]**2.0,dtype=np.float64)
+ weight=data["CellMassMsun"].sum(dtype=np.float64)
return j_mag, m_enc, e_term_pre, weight
def _combBaryonSpinParameter(data, j_mag, m_enc, e_term_pre, weight):
# Because it's a vector field, we have to ensure we have enough dimensions
@@ -304,15 +307,15 @@
This function returns the spin parameter for the baryons, but it uses
the particles in calculating enclosed mass.
"""
- m_enc = data["CellMassMsun"].sum() + data["ParticleMassMsun"].sum()
+ m_enc = _TotalMass(data)
amx = data["ParticleSpecificAngularMomentumX"]*data["ParticleMassMsun"]
- if amx.size == 0: return (np.zeros((3,), dtype='float64'), m_enc, 0, 0)
+ if amx.size == 0: return (np.zeros((3,), dtype=np.float64), m_enc, 0, 0)
amy = data["ParticleSpecificAngularMomentumY"]*data["ParticleMassMsun"]
amz = data["ParticleSpecificAngularMomentumZ"]*data["ParticleMassMsun"]
- j_mag = np.array([amx.sum(), amy.sum(), amz.sum()])
+ j_mag = np.array([amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)])
e_term_pre = np.sum(data["ParticleMassMsun"]
- *data["ParticleVelocityMagnitude"]**2.0)
- weight=data["ParticleMassMsun"].sum()
+ *data["ParticleVelocityMagnitude"]**2.0,dtype=np.float64)
+ weight=data["ParticleMassMsun"].sum(dtype=np.float64)
return j_mag, m_enc, e_term_pre, weight
add_quantity("ParticleSpinParameter", function=_ParticleSpinParameter,
combine_function=_combBaryonSpinParameter, n_ret=4)
@@ -359,19 +362,19 @@
kinetic = 0.5 * (data["CellMass"] *
((data["x-velocity"] - bv_x)**2 +
(data["y-velocity"] - bv_y)**2 +
- (data["z-velocity"] - bv_z)**2)).sum()
+ (data["z-velocity"] - bv_z)**2)).sum(dtype=np.float64)
if (include_particles):
- mass_to_use = data["TotalMass"]
+ mass_to_use = data["TotalMass"]
kinetic += 0.5 * (data["Dark_Matter_Mass"] *
((data["cic_particle_velocity_x"] - bv_x)**2 +
(data["cic_particle_velocity_y"] - bv_y)**2 +
- (data["cic_particle_velocity_z"] - bv_z)**2)).sum()
+ (data["cic_particle_velocity_z"] - bv_z)**2)).sum(dtype=np.float64)
else:
- mass_to_use = data["CellMass"]
+ mass_to_use = data["CellMass"]
# Add thermal energy to kinetic energy
if (include_thermal_energy):
- thermal = (data["ThermalEnergy"] * mass_to_use).sum()
+ thermal = (data["ThermalEnergy"] * mass_to_use).sum(dtype=np.float64)
kinetic += thermal
if periodic_test:
kinetic = np.ones_like(kinetic)
@@ -698,9 +701,9 @@
totals = []
for field in fields:
if data[field].size < 1:
- totals.append(0.0)
+ totals.append(np.zeros(1,dtype=prec_accum[data[field].dtype])[0])
continue
- totals.append(data[field].sum())
+ totals.append(data[field].sum(dtype=prec_accum[data[field].dtype]))
return len(fields), totals
def _combTotalQuantity(data, n_fields, totals):
totals = np.atleast_2d(totals)
@@ -717,7 +720,7 @@
pos = [data[(particle_type,"particle_position_%s"%ax)] for ax in "xyz"]
pos = np.array(pos).T
mas = data[(particle_type,"particle_mass")]
- calc_radius= lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1))
+ calc_radius= lambda x,y:np.sqrt(np.sum((x-y)**2.0,axis=1,dtype=np.float64))
density = 0
if pos.shape[0]==0:
return -1.0,[-1.,-1.,-1.]
@@ -734,7 +737,7 @@
center = 0.5*(le+re)
idx = calc_radius(pos,center)<bin_size
pos, mas = pos[idx],mas[idx]
- density = max(density,mas.sum()/bin_size**3.0)
+ density = max(density,mas.sum(dtype=np.float64)/bin_size**3.0)
return density, center
def _combParticleDensityCenter(data,densities,centers):
i = np.argmax(densities)
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -67,6 +67,7 @@
self._child_mask = self._child_indices = self._child_index_mask = None
self.start_index = None
self._last_mask = None
+ self._last_count = -1
self._last_selector_id = None
self._current_particle_type = 'all'
self._current_fluid_type = self.pf.default_fluid_type
@@ -447,14 +448,14 @@
def select_icoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None: return np.empty((0,3), dtype='int64')
- coords = convert_mask_to_indices(mask, mask.sum())
+ coords = convert_mask_to_indices(mask, self._last_count)
coords += self.get_global_startindex()[None, :]
return coords
def select_fcoords(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None: return np.empty((0,3), dtype='float64')
- coords = convert_mask_to_indices(mask, mask.sum()).astype("float64")
+ coords = convert_mask_to_indices(mask, self._last_count).astype("float64")
coords += 0.5
coords *= self.dds[None, :]
coords += self.LeftEdge[None, :]
@@ -471,7 +472,7 @@
def select_ires(self, dobj):
mask = self._get_selector_mask(dobj.selector)
if mask is None: return np.empty(0, dtype='int64')
- coords = np.empty(mask.sum(), dtype='int64')
+ coords = np.empty(self._last_count, dtype='int64')
coords[:] = self.Level
return coords
@@ -496,6 +497,10 @@
else:
self._last_mask = mask = selector.fill_mask(self)
self._last_selector_id = id(selector)
+ if mask is None:
+ self._last_count = 0
+ else:
+ self._last_count = mask.sum()
return mask
def select(self, selector, source, dest, offset):
@@ -508,7 +513,7 @@
def count(self, selector):
mask = self._get_selector_mask(selector)
if mask is None: return 0
- return mask.sum()
+ return self._last_count
def count_particles(self, selector, x, y, z):
# We don't cache the selector results
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -42,6 +42,7 @@
mh
def particle_deposition_functions(ptype, coord_name, mass_name, registry):
+ orig = set(registry.keys())
def particle_count(field, data):
pos = data[ptype, coord_name]
d = data.deposit(pos, method = "count")
@@ -112,6 +113,9 @@
particle_type = True,
units = r"\mathrm{M}_\odot")
+ return list(set(registry.keys()).difference(orig))
+
+
def particle_scalar_functions(ptype, coord_name, vel_name, registry):
# Now we have to set up the various velocity and coordinate things. In the
@@ -119,6 +123,8 @@
# elsewhere, and stop using these.
# Note that we pass in _ptype here so that it's defined inside the closure.
+ orig = set(registry.keys())
+
def _get_coord_funcs(axi, _ptype):
def _particle_velocity(field, data):
return data[_ptype, vel_name][:,axi]
@@ -132,9 +138,12 @@
registry.add_field((ptype, "particle_position_%s" % ax),
particle_type = True, function = p)
+ return list(set(registry.keys()).difference(orig))
+
def particle_vector_functions(ptype, coord_names, vel_names, registry):
# This will column_stack a set of scalars to create vector fields.
+ orig = set(registry.keys())
def _get_vec_func(_ptype, names):
def particle_vectors(field, data):
@@ -147,3 +156,4 @@
function=_get_vec_func(ptype, vel_names),
particle_type=True)
+ return list(set(registry.keys()).difference(orig))
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/particle_filters.py
--- /dev/null
+++ b/yt/data_objects/particle_filters.py
@@ -0,0 +1,95 @@
+"""
+This is a library for defining and using particle filters.
+
+Author: Matthew Turk <matthewturk at gmail.com>
+Affiliation: Columbia University
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2013 Matthew Turk. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+
+import numpy as np
+import copy
+from contextlib import contextmanager
+
+from yt.data_objects.field_info_container import \
+ NullFunc, TranslationFunc
+from yt.utilities.exceptions import YTIllDefinedFilter
+from yt.funcs import *
+
+# One to many mapping
+filter_registry = defaultdict(list)
+
+class DummyFieldInfo(object):
+ particle_type = True
+dfi = DummyFieldInfo()
+
+class ParticleFilter(object):
+ def __init__(self, name, function, requires, filtered_type):
+ self.name = name
+ self.function = function
+ self.requires = requires[:]
+ self.filtered_type = filtered_type
+
+ @contextmanager
+ def apply(self, dobj):
+ with dobj._chunked_read(dobj._current_chunk):
+ with dobj._field_type_state(self.filtered_type, dfi):
+ # We won't be storing the field data from the whole read, so we
+ # start by filtering now.
+ filter = self.function(self, dobj)
+ yield
+ # Retain a reference here, and we'll filter all appropriate fields
+ # later.
+ fd = dobj.field_data
+ for f, tr in fd.items():
+ if f[0] != self.filtered_type: continue
+ if tr.shape != filter.shape and tr.shape[0] != filter.shape[0]:
+ raise YTIllDefinedFilter(self, tr.shape, filter.shape)
+ elif filter.size == 0:
+ # Filtering empty set. This keeps our dimensions correct.
+ # Otherwise we end up with out-of-axis and shape problems.
+ d = tr.copy()
+ elif len(tr.shape) > len(filter.shape):
+ # Filter must always be 1D
+ d = tr[filter,:]
+ else:
+ d = tr[filter]
+ dobj.field_data[self.name, f[1]] = d
+
+ def available(self, field_list):
+ # Note that this assumes that all the fields in field_list have the
+ # same form as the 'requires' attributes. This won't be true if the
+ # fields are implicitly "all" or something.
+ return all((self.filtered_type, field) in field_list for field in self.requires)
+
+ def wrap_func(self, field_name, old_fi):
+ new_fi = copy.copy(old_fi)
+ new_fi.name = (self.filtered_type, field_name[1])
+ return new_fi
+
+def add_particle_filter(name, function, requires = None, filtered_type = "all"):
+ if requires is None: requires = []
+ filter = ParticleFilter(name, function, requires, filtered_type)
+ filter_registry[name].append(filter)
+
+def particle_filter(name, requires = None, filtered_type = "all"):
+ def _pfilter(func):
+ add_particle_filter(name, func, requires, filtered_type)
+ return func
+ return _pfilter
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -490,14 +490,21 @@
"""
normal = self.normal
center = self.center
+ self.fields = [k for k in self.field_data.keys()
+ if k not in self._key_fields]
from yt.visualization.plot_window import \
GetObliqueWindowParameters, PWViewerMPL
- from yt.visualization.fixed_resolution import ObliqueFixedResolutionBuffer
- (bounds, center_rot, units) = GetObliqueWindowParameters(normal, center, width, self.pf)
+ from yt.visualization.fixed_resolution import \
+ ObliqueFixedResolutionBuffer
+ (bounds, center_rot, units) = \
+ GetObliqueWindowParameters(normal, center, width, self.pf)
if axes_unit is None and units != ('1', '1'):
axes_units = units
- pw = PWViewerMPL(self, bounds, origin='center-window', periodic=False, oblique=True,
- frb_generator=ObliqueFixedResolutionBuffer, plot_type='OffAxisSlice')
+ pw = PWViewerMPL(
+ self, bounds, fields=self.fields, origin='center-window',
+ periodic=False, oblique=True,
+ frb_generator=ObliqueFixedResolutionBuffer,
+ plot_type='OffAxisSlice')
pw.set_axes_unit(axes_unit)
return pw
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -9,5 +9,6 @@
from numpy.distutils.misc_util import Configuration
config = Configuration('data_objects', parent_package, top_path)
config.make_config_py() # installs __config__.py
+ config.add_subpackage("tests")
#config.make_svn_version_py()
return config
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -37,6 +37,8 @@
output_type_registry
from yt.data_objects.field_info_container import \
FieldInfoContainer, NullFunc
+from yt.data_objects.particle_filters import \
+ filter_registry
from yt.utilities.minimal_representation import \
MinimalStaticOutput
@@ -61,6 +63,8 @@
coordinates = None
max_level = 99
storage_filename = None
+ _particle_mass_name = None
+ _particle_coordinates_name = None
class __metaclass__(type):
def __init__(cls, name, b, d):
@@ -71,7 +75,12 @@
def __new__(cls, filename=None, *args, **kwargs):
if not isinstance(filename, types.StringTypes):
obj = object.__new__(cls)
- obj.__init__(filename, *args, **kwargs)
+ # The Stream frontend uses a StreamHandler object to pass metadata
+ # to __init__.
+ is_stream = (hasattr(filename, 'get_fields') and
+ hasattr(filename, 'get_particle_type'))
+ if not is_stream:
+ obj.__init__(filename, *args, **kwargs)
return obj
apath = os.path.abspath(filename)
if not os.path.exists(apath): raise IOError(filename)
@@ -89,6 +98,7 @@
self.file_style = file_style
self.conversion_factors = {}
self.parameters = {}
+ self.known_filters = {}
# path stuff
self.parameter_filename = str(filename)
@@ -250,6 +260,21 @@
else:
raise YTGeometryNotSupported(self.geometry)
+ def add_particle_filter(self, filter):
+ if isinstance(filter, types.StringTypes):
+ used = False
+ for f in filter_registry[filter]:
+ used = self.h._setup_filtered_type(f)
+ if used:
+ filter = f
+ break
+ else:
+ used = self.h._setup_filtered_type(filter)
+ if not used:
+ return False
+ self.known_filters[filter.name] = filter
+ return True
+
_last_freq = (None, None)
_last_finfo = None
def _get_field_info(self, ftype, fname):
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -1139,7 +1139,7 @@
return get_sph_r_component(Bfields, theta, phi, normal)
-add_field("BRadial", function=_BPoloidal,
+add_field("BRadial", function=_BRadial,
units=r"\rm{Gauss}",
validators=[ValidateParameter("normal")])
@@ -1472,7 +1472,7 @@
domegax_dt = data["VorticityX"] / data["VorticityGrowthX"]
domegay_dt = data["VorticityY"] / data["VorticityGrowthY"]
domegaz_dt = data["VorticityZ"] / data["VorticityGrowthZ"]
- return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt)
+ return np.sqrt(domegax_dt**2 + domegay_dt**2 + domegaz_dt**2)
add_field("VorticityGrowthTimescale", function=_VorticityGrowthTimescale,
validators=[ValidateSpatial(1,
["x-velocity", "y-velocity", "z-velocity"])],
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -341,7 +341,7 @@
if selected_mass[ispec] :
count = len(data[selected_mass[ispec]])
data[selected_mass[ispec]].resize(count+1)
- data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"][0]
+ data[selected_mass[ispec]][count] = self.parameters["particle_species_mass"][ispec]
status = artio_particle_read_species_end( self.handle )
check_artio_status(status)
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -288,6 +288,15 @@
for unit in mpc_conversion.keys():
self.units[unit] = self.parameters['unit_l']\
* mpc_conversion[unit] / mpc_conversion["cm"]
+ if self.cosmological_simulation:
+ for unit in mpc_conversion:
+ self.units["%sh" % unit] = self.units[unit] * \
+ self.hubble_constant
+ self.units["%shcm" % unit] = \
+ (self.units["%sh" % unit] /
+ (1 + self.current_redshift))
+ self.units["%scm" % unit] = \
+ self.units[unit] / (1 + self.current_redshift)
for unit in sec_conversion.keys():
self.time_units[unit] = self.parameters['unit_t']\
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/artio/fields.py
--- a/yt/frontends/artio/fields.py
+++ b/yt/frontends/artio/fields.py
@@ -34,6 +34,9 @@
ValidateSpatial, \
ValidateGridType
import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+ particle_deposition_functions, \
+ particle_vector_functions
import numpy as np
KnownARTIOFields = FieldInfoContainer()
@@ -247,51 +250,20 @@
pf = "particle_velocity_%s" % ax
add_artio_field(pf, function=NullFunc,
particle_type=True)
-add_artio_field("particle_mass", function=NullFunc, particle_type=True)
-add_artio_field("particle_index", function=NullFunc, particle_type=True)
for ax in 'xyz':
pf = "particle_position_%s" % ax
add_artio_field(pf, function=NullFunc,
particle_type=True)
-
-def ParticleMass(field, data):
- return data['particle_mass']
-
-
-def _convertParticleMass(field, data):
- return data.convert('particle_mass')
-add_field("ParticleMass",
- function=ParticleMass,
+def _convertParticleMass(data):
+ return np.float64(data.convert('particle_mass'))
+add_field("particle_mass",
+ function=NullFunc,
convert_function=_convertParticleMass,
units=r"\rm{g}",
particle_type=True)
-
-
-def ParticleMassMsunAll(field, data):
- return data['all', 'particle_mass'] * \
- data.pf.conversion_factors['particle_mass_msun']
-add_field(('all', "ParticleMassMsun"),
- function=ParticleMassMsunAll,
- units=r"\rm{M\odot}", particle_type=True)
-
-
-def ParticleMassMsunStars(field, data):
- return data['stars', 'particle_mass'] * \
- data.pf.conversion_factors['particle_mass_msun']
-add_field(('stars', "ParticleMassMsun"),
- function=ParticleMassMsunStars,
- units=r"\rm{M\odot}", particle_type=True)
-
-
-def ParticleMassMsunNbody(field, data):
- return data['nbody', 'particle_mass'] * \
- data.pf.conversion_factors['particle_mass_msun']
-add_field(('nbody', "ParticleMassMsun"),
- function=ParticleMassMsunNbody,
- units=r"\rm{M\odot}", particle_type=True)
-
+add_artio_field("particle_index", function=NullFunc, particle_type=True)
#add_artio_field("creation_time", function=NullFunc, particle_type=True)
def _particle_age(field, data):
@@ -303,6 +275,15 @@
add_field(("stars","particle_age"), function=_particle_age, units=r"\rm{s}",
particle_type=True)
+# We can now set up particle vector and particle deposition fields.
+
+for ptype in ("all", "nbody", "stars"):
+ particle_vector_functions(ptype,
+ ["particle_position_%s" % ax for ax in 'xyz'],
+ ["particle_velocity_%s" % ax for ax in 'xyz'],
+ ARTIOFieldInfo)
+ particle_deposition_functions(ptype, "Coordinates", "particle_mass",
+ ARTIOFieldInfo)
def mass_dm(field, data):
tr = np.ones(data.ActiveDimensions, dtype='float32')
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/artio/io.py
--- a/yt/frontends/artio/io.py
+++ b/yt/frontends/artio/io.py
@@ -49,4 +49,7 @@
for onechunk in chunks:
for artchunk in onechunk.objs:
artchunk.fill_particles(tr, fields)
+ for ftype, fname in tr.keys():
+ if fname == "particle_mass":
+ tr[ftype, fname] = tr[ftype, fname].astype("float64")
return tr
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/castro/data_structures.py
--- a/yt/frontends/castro/data_structures.py
+++ b/yt/frontends/castro/data_structures.py
@@ -608,7 +608,7 @@
self.parameters["TopGridRank"] = len(self.parameters["TopGridDimensions"])
self.dimensionality = self.parameters["TopGridRank"]
self.periodicity = ensure_tuple(self.fparameters['castro.lo_bc'] == 0)
- self.domain_dimensions = self.parameters["TopGridDimensions"]
+ self.domain_dimensions = np.array(self.parameters["TopGridDimensions"])
self.refine_by = self.parameters.get("RefineBy", 2)
if (self.parameters.has_key("ComovingCoordinates") and
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -706,6 +706,9 @@
_hierarchy_class = EnzoHierarchy
_fieldinfo_fallback = EnzoFieldInfo
_fieldinfo_known = KnownEnzoFields
+ _particle_mass_name = "ParticleMass"
+ _particle_coordinates_name = "Coordinates"
+
def __init__(self, filename, data_style=None,
file_style = None,
parameter_override = None,
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -36,7 +36,9 @@
import numpy as np
from yt.funcs import *
-_convert_mass = ("particle_mass",)
+_convert_mass = ("particle_mass","mass")
+
+_particle_position_names = {}
class IOHandlerPackedHDF5(BaseIOHandler):
@@ -56,7 +58,8 @@
ptypes = list(set([ftype for ftype, fname in fields]))
fields = list(set(fields))
if len(ptypes) > 1: raise NotImplementedError
- pfields = [(ptypes[0], "particle_position_%s" % ax) for ax in 'xyz']
+ pn = _particle_position_names.get(ptypes[0], r"particle_position_%s")
+ pfields = [(ptypes[0], pn % ax) for ax in 'xyz']
size = 0
for chunk in chunks:
data = self._read_chunk_data(chunk, pfields, 'active',
@@ -83,7 +86,7 @@
for field in set(fields):
ftype, fname = field
gdata = data[g.id].pop(fname)[mask]
- if fname == "particle_mass":
+ if fname in _convert_mass:
gdata *= g.dds.prod()
rv[field][ind:ind+gdata.size] = gdata
ind += gdata.size
@@ -134,7 +137,7 @@
for field in set(fields):
ftype, fname = field
gdata = data[g.id].pop(fname)[mask]
- if fname == "particle_mass":
+ if fname in _convert_mass:
gdata *= g.dds.prod()
rv[field][ind:ind+gdata.size] = gdata
ind += gdata.size
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -212,14 +212,6 @@
def _setup_data_io(self):
self.io = io_registry[self.data_style](self.parameter_file)
- def _chunk_io(self, dobj, cache = True):
- gobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
- # We'll take the max of 128 and the number of processors
- nl = max(16, ytcfg.getint("yt", "__topcomm_parallel_size"))
- for gs in list_chunks(gobjs, nl):
- yield YTDataChunk(dobj, "io", gs, self._count_selection,
- cache = cache)
-
class FLASHStaticOutput(StaticOutput):
_hierarchy_class = FLASHHierarchy
_fieldinfo_fallback = FLASHFieldInfo
@@ -473,7 +465,7 @@
try:
self.parameters["usecosmology"]
self.cosmological_simulation = 1
- self.current_redshift = self.parameters['redshift']
+ self.current_redshift = 1.0/self.parameters['scalefactor'] - 1.0
self.omega_lambda = self.parameters['cosmologicalconstant']
self.omega_matter = self.parameters['omegamatter']
self.hubble_constant = self.parameters['hubbleconstant']
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -92,6 +92,6 @@
ind = 0
for chunk in chunks:
for g in chunk.objs:
- data = ds[g.id - g._id_offset,:,:,:].transpose()[mask]
+ data = ds[g.id - g._id_offset,:,:,:].transpose()
ind += g.select(selector, data, rv[field], ind) # caches
return rv
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/gdf/data_structures.py
--- a/yt/frontends/gdf/data_structures.py
+++ b/yt/frontends/gdf/data_structures.py
@@ -76,8 +76,9 @@
LE, RE = self.hierarchy.grid_left_edge[id,:], \
self.hierarchy.grid_right_edge[id,:]
self.dds = np.array((RE-LE)/self.ActiveDimensions)
- if self.pf.dimensionality < 2: self.dds[1] = 1.0
- if self.pf.dimensionality < 3: self.dds[2] = 1.0
+ if self.pf.data_software != "piernik":
+ if self.pf.dimensionality < 2: self.dds[1] = 1.0
+ if self.pf.dimensionality < 3: self.dds[2] = 1.0
self.field_data['dx'], self.field_data['dy'], self.field_data['dz'] = self.dds
@property
@@ -235,6 +236,11 @@
def _parse_parameter_file(self):
self._handle = h5py.File(self.parameter_filename, "r")
+ if 'data_software' in self._handle['gridded_data_format'].attrs:
+ self.data_software = \
+ self._handle['gridded_data_format'].attrs['data_software']
+ else:
+ self.data_software = "unknown"
sp = self._handle["/simulation_parameters"].attrs
self.domain_left_edge = sp["domain_left_edge"][:]
self.domain_right_edge = sp["domain_right_edge"][:]
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/ramses/data_structures.py
--- a/yt/frontends/ramses/data_structures.py
+++ b/yt/frontends/ramses/data_structures.py
@@ -357,6 +357,8 @@
_hierarchy_class = RAMSESGeometryHandler
_fieldinfo_fallback = RAMSESFieldInfo
_fieldinfo_known = KnownRAMSESFields
+ _particle_mass_name = "ParticleMass"
+ _particle_coordinates_name = "Coordinates"
def __init__(self, filename, data_style='ramses',
fields = None,
@@ -393,11 +395,15 @@
self.conversion_factors["y-velocity"] = vel_u
self.conversion_factors["z-velocity"] = vel_u
# Necessary to get the length units in, which are needed for Mass
+ # We also have to multiply by the boxlength here to scale into our
+ # domain.
self.conversion_factors['mass'] = rho_u * self.parameters['unit_l']**3
def _setup_nounits_units(self):
# Note that unit_l *already* converts to proper!
- unit_l = self.parameters['unit_l']
+ # Also note that unit_l must be multiplied by the boxlen parameter to
+ # ensure we are correctly set up for the current domain.
+ unit_l = self.parameters['unit_l'] * self.parameters['boxlen']
for unit in mpc_conversion.keys():
self.units[unit] = unit_l * mpc_conversion[unit] / mpc_conversion["cm"]
self.units['%sh' % unit] = self.units[unit] * self.hubble_constant
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/ramses/io.py
--- a/yt/frontends/ramses/io.py
+++ b/yt/frontends/ramses/io.py
@@ -98,9 +98,13 @@
f = open(subset.domain.part_fn, "rb")
foffsets = subset.domain.particle_field_offsets
tr = {}
- #for field in sorted(fields, key=lambda a:foffsets[a]):
+ # We do *all* conversion into boxlen here.
+ # This means that no other conversions need to be applied to convert
+ # positions into the same domain as the octs themselves.
for field in fields:
f.seek(foffsets[field])
dt = subset.domain.particle_field_types[field]
tr[field] = fpu.read_vector(f, dt)
+ if field[1].startswith("particle_position"):
+ np.divide(tr[field], subset.domain.pf["boxlen"], tr[field])
return tr
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/setup.py
--- a/yt/frontends/setup.py
+++ b/yt/frontends/setup.py
@@ -24,4 +24,9 @@
config.add_subpackage("sph")
config.add_subpackage("stream")
config.add_subpackage("tiger")
+ config.add_subpackage("flash/tests")
+ config.add_subpackage("enzo/tests")
+ config.add_subpackage("orion/tests")
+ config.add_subpackage("stream/tests")
+ config.add_subpackage("chombo/tests")
return config
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -129,6 +129,8 @@
_file_class = GadgetBinaryFile
_fieldinfo_fallback = GadgetFieldInfo
_fieldinfo_known = KnownGadgetFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
_header_spec = (('Npart', 6, 'i'),
('Massarr', 6, 'd'),
('Time', 1, 'd'),
@@ -258,6 +260,8 @@
_file_class = ParticleFile
_fieldinfo_fallback = OWLSFieldInfo # For now we have separate from Gadget
_fieldinfo_known = KnownOWLSFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
_header_spec = None # Override so that there's no confusion
def __init__(self, filename, data_style="OWLS"):
@@ -337,6 +341,8 @@
_file_class = TipsyFile
_fieldinfo_fallback = TipsyFieldInfo
_fieldinfo_known = KnownTipsyFields
+ _particle_mass_name = "Mass"
+ _particle_coordinates_name = "Coordinates"
_header_spec = (('time', 'd'),
('nbodies', 'i'),
('ndim', 'i'),
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -72,7 +72,9 @@
def _AllFields(field, data):
v = []
for ptype in data.pf.particle_types:
- if ptype == "all": continue
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
v.append(data[ptype, fname].copy())
rv = np.concatenate(v, axis=0)
return rv
@@ -82,7 +84,9 @@
def _AllFields(field, data):
v = []
for ptype in data.pf.particle_types:
- if ptype == "all": continue
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
v.append(data[ptype, fname][:,axi])
rv = np.concatenate(v, axis=0)
return rv
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -34,6 +34,8 @@
AMRGridPatch
from yt.geometry.grid_geometry_handler import \
GridGeometryHandler
+from yt.geometry.particle_geometry_handler import \
+ ParticleGeometryHandler
from yt.data_objects.static_output import \
StaticOutput
from yt.utilities.logger import ytLogger as mylog
@@ -47,6 +49,8 @@
mpc_conversion, sec_conversion
from yt.utilities.flagging_methods import \
FlaggingGrid
+from yt.frontends.sph.data_structures import \
+ ParticleFile
from .fields import \
StreamFieldInfo, \
@@ -704,3 +708,122 @@
assign_particle_data(pf, pdata)
return pf
+
+class StreamParticleGeometryHandler(ParticleGeometryHandler):
+
+
+ def __init__(self, pf, data_style = None):
+ self.stream_handler = pf.stream_handler
+ super(StreamParticleGeometryHandler, self).__init__(pf, data_style)
+
+ def _setup_data_io(self):
+ if self.stream_handler.io is not None:
+ self.io = self.stream_handler.io
+ else:
+ self.io = io_registry[self.data_style](self.stream_handler)
+
+class StreamParticleFile(ParticleFile):
+ pass
+
+class StreamParticlesStaticOutput(StreamStaticOutput):
+ _hierarchy_class = StreamParticleGeometryHandler
+ _file_class = StreamParticleFile
+ _fieldinfo_fallback = StreamFieldInfo
+ _fieldinfo_known = KnownStreamFields
+ _data_style = "stream_particles"
+ file_count = 1
+ filename_template = "stream_file"
+
+def load_particles(data, sim_unit_to_cm, bbox=None,
+ sim_time=0.0, periodicity=(True, True, True)):
+ r"""Load a set of particles into yt as a
+ :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
+
+ This should allow a collection of particle data to be loaded directly into
+ yt and analyzed as would any others. This comes with several caveats:
+ * Units will be incorrect unless the data has already been converted to
+ cgs.
+ * Some functions may behave oddly, and parallelism will be
+ disappointing or non-existent in most cases.
+
+ This will initialize an Octree of data. Note that fluid fields will not
+ work yet, or possibly ever.
+
+ Parameters
+ ----------
+ data : dict
+ This is a dict of numpy arrays, where the keys are the field names.
+ Particles positions must be named "particle_position_x",
+ "particle_position_y", "particle_position_z".
+ sim_unit_to_cm : float
+ Conversion factor from simulation units to centimeters
+ bbox : array_like (xdim:zdim, LE:RE), optional
+ Size of computational domain in units sim_unit_to_cm
+ sim_time : float, optional
+ The simulation time in seconds
+ periodicity : tuple of booleans
+ Determines whether the data will be treated as periodic along
+ each axis
+
+ Examples
+ --------
+
+ >>> pos = [np.random.random(128*128*128) for i in range(3)]
+ >>> data = dict(particle_position_x = pos[0],
+ ... particle_position_y = pos[1],
+ ... particle_position_z = pos[2])
+ >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]])
+ >>> pf = load_particles(data, 3.08e24, bbox=bbox)
+
+ """
+
+ domain_dimensions = np.ones(3, "int32") * 2
+ nprocs = 1
+ if bbox is None:
+ bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], 'float64')
+ domain_left_edge = np.array(bbox[:, 0], 'float64')
+ domain_right_edge = np.array(bbox[:, 1], 'float64')
+ grid_levels = np.zeros(nprocs, dtype='int32').reshape((nprocs,1))
+
+ sfh = StreamDictFieldHandler()
+
+ particle_types = set_particle_types(data)
+
+ sfh.update({'stream_file':data})
+ grid_left_edges = domain_left_edge
+ grid_right_edges = domain_right_edge
+ grid_dimensions = domain_dimensions.reshape(nprocs,3).astype("int32")
+
+ # I'm not sure we need any of this.
+ handler = StreamHandler(
+ grid_left_edges,
+ grid_right_edges,
+ grid_dimensions,
+ grid_levels,
+ -np.ones(nprocs, dtype='int64'),
+ np.zeros(nprocs, dtype='int64').reshape(nprocs,1), # Temporary
+ np.zeros(nprocs).reshape((nprocs,1)),
+ sfh,
+ particle_types=particle_types,
+ periodicity=periodicity
+ )
+
+ handler.name = "ParticleData"
+ handler.domain_left_edge = domain_left_edge
+ handler.domain_right_edge = domain_right_edge
+ handler.refine_by = 2
+ handler.dimensionality = 3
+ handler.domain_dimensions = domain_dimensions
+ handler.simulation_time = sim_time
+ handler.cosmology_simulation = 0
+
+ spf = StreamParticlesStaticOutput(handler)
+ spf.units["cm"] = sim_unit_to_cm
+ spf.units['1'] = 1.0
+ spf.units["unitary"] = 1.0
+ box_in_mpc = sim_unit_to_cm / mpc_conversion['cm']
+ for unit in mpc_conversion.keys():
+ spf.units[unit] = mpc_conversion[unit] * box_in_mpc
+
+ return spf
+
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/stream/fields.py
--- a/yt/frontends/stream/fields.py
+++ b/yt/frontends/stream/fields.py
@@ -34,6 +34,9 @@
ValidateSpatial, \
ValidateGridType
import yt.data_objects.universal_fields
+from yt.data_objects.particle_fields import \
+ particle_deposition_functions, \
+ particle_vector_functions
KnownStreamFields = FieldInfoContainer()
add_stream_field = KnownStreamFields.add_field
@@ -69,3 +72,9 @@
add_field(("all", "ParticleMass"), function = TranslationFunc("particle_mass"),
particle_type=True)
+
+particle_vector_functions("all", ["particle_position_%s" % ax for ax in 'xyz'],
+ ["particle_velocity_%s" % ax for ax in 'xyz'],
+ StreamFieldInfo)
+particle_deposition_functions("all", "Coordinates", "ParticleMass",
+ StreamFieldInfo)
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/frontends/stream/io.py
--- a/yt/frontends/stream/io.py
+++ b/yt/frontends/stream/io.py
@@ -32,6 +32,8 @@
from yt.utilities.io_handler import \
BaseIOHandler, _axis_ids
from yt.utilities.logger import ytLogger as mylog
+from yt.utilities.lib.geometry_utils import compute_morton
+from yt.utilities.exceptions import *
class IOHandlerStream(BaseIOHandler):
@@ -127,3 +129,83 @@
def _read_exception(self):
return KeyError
+class StreamParticleIOHandler(BaseIOHandler):
+
+ _data_style = "stream_particles"
+
+ def __init__(self, stream_handler):
+ self.fields = stream_handler.fields
+ BaseIOHandler.__init__(self)
+
+ def _read_particle_selection(self, chunks, selector, fields):
+ rv = {}
+ # We first need a set of masks for each particle type
+ ptf = defaultdict(list)
+ psize = defaultdict(lambda: 0)
+ chunks = list(chunks)
+ for ftype, fname in fields:
+ ptf[ftype].append(fname)
+ # For this type of file, we actually have something slightly different.
+ # We are given a list of ParticleDataChunks, which is composed of
+ # individual ParticleOctreeSubsets. The data_files attribute on these
+ # may in fact overlap. So we will iterate over a union of all the
+ # data_files.
+ data_files = set([])
+ for chunk in chunks:
+ for obj in chunk.objs:
+ data_files.update(obj.data_files)
+ for data_file in data_files:
+ f = self.fields[data_file.filename]
+ # This double-reads
+ for ptype, field_list in sorted(ptf.items()):
+ assert(ptype == "all")
+ psize[ptype] += selector.count_points(
+ f["particle_position_x"],
+ f["particle_position_y"],
+ f["particle_position_z"])
+ # Now we have all the sizes, and we can allocate
+ ind = {}
+ for field in fields:
+ mylog.debug("Allocating %s values for %s", psize[field[0]], field)
+ rv[field] = np.empty(psize[field[0]], dtype="float64")
+ ind[field] = 0
+ for data_file in data_files:
+ f = self.fields[data_file.filename]
+ for ptype, field_list in sorted(ptf.items()):
+ assert(ptype == "all")
+ mask = selector.select_points(
+ f["particle_position_x"],
+ f["particle_position_y"],
+ f["particle_position_z"])
+ if mask is None: continue
+ for field in field_list:
+ data = f[field][mask,...]
+ my_ind = ind[ptype, field]
+ mylog.debug("Filling from %s to %s with %s",
+ my_ind, my_ind+data.shape[0], field)
+ rv[ptype, field][my_ind:my_ind + data.shape[0],...] = data
+ ind[ptype, field] += data.shape[0]
+ return rv
+
+ def _initialize_index(self, data_file, regions):
+ # self.fields[g.id][fname] is the pattern here
+ pos = np.column_stack(self.fields[data_file.filename][
+ "particle_position_%s" % ax] for ax in 'xyz')
+ if np.any(pos.min(axis=0) <= data_file.pf.domain_left_edge) or \
+ np.any(pos.max(axis=0) >= data_file.pf.domain_right_edge):
+ raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0),
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ regions.add_data_file(pos, data_file.file_id)
+ morton = compute_morton(
+ pos[:,0], pos[:,1], pos[:,2],
+ data_file.pf.domain_left_edge,
+ data_file.pf.domain_right_edge)
+ return morton
+
+ def _count_particles(self, data_file):
+ npart = self.fields[data_file.filename]["particle_position_x"].size
+ return {'all': npart}
+
+ def _identify_fields(self, data_file):
+ return [ ("all", k) for k in self.fields[data_file.filename].keys()]
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -39,6 +39,8 @@
data_object_registry
from yt.data_objects.field_info_container import \
NullFunc
+from yt.data_objects.particle_fields import \
+ particle_deposition_functions
from yt.utilities.io_handler import io_registry
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.parallel_tools.parallel_analysis_interface import \
@@ -162,8 +164,43 @@
self.parameter_file.field_info[field] = known_fields[field]
def _setup_derived_fields(self):
+ self.derived_field_list = []
+ self.filtered_particle_types = []
+ fc, fac = self._derived_fields_to_check()
+ self._derived_fields_add(fc, fac)
+
+ def _setup_filtered_type(self, filter):
+ if not filter.available(self.derived_field_list):
+ return False
fi = self.parameter_file.field_info
- self.derived_field_list = []
+ fd = self.parameter_file.field_dependencies
+ available = False
+ for fn in self.derived_field_list:
+ if fn[0] == filter.filtered_type:
+ # Now we can add this
+ available = True
+ self.derived_field_list.append(
+ (filter.name, fn[1]))
+ fi[filter.name, fn[1]] = filter.wrap_func(fn, fi[fn])
+ # Now we append the dependencies
+ fd[filter.name, fn[1]] = fd[fn]
+ if available:
+ self.parameter_file.particle_types += (filter.name,)
+ self.filtered_particle_types.append(filter.name)
+ self._setup_particle_fields(filter.name, True)
+ return available
+
+ def _setup_particle_fields(self, ptype, filtered = False):
+ pf = self.parameter_file
+ pmass = self.parameter_file._particle_mass_name
+ pcoord = self.parameter_file._particle_coordinates_name
+ if pmass is None or pcoord is None: return
+ df = particle_deposition_functions(ptype,
+ pcoord, pmass, self.parameter_file.field_info)
+ self._derived_fields_add(df)
+
+ def _derived_fields_to_check(self):
+ fi = self.parameter_file.field_info
# First we construct our list of fields to check
fields_to_check = []
fields_to_allcheck = []
@@ -187,6 +224,15 @@
new_fields.append(new_fi.name)
fields_to_check += new_fields
fields_to_allcheck.append(field)
+ return fields_to_check, fields_to_allcheck
+
+ def _derived_fields_add(self, fields_to_check = None,
+ fields_to_allcheck = None):
+ if fields_to_check is None:
+ fields_to_check = []
+ if fields_to_allcheck is None:
+ fields_to_allcheck = []
+ fi = self.parameter_file.field_info
for field in fields_to_check:
try:
fd = fi[field].get_dependencies(pf = self.parameter_file)
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -271,15 +271,15 @@
# per cell, M_k, and Q_k and also the number of particles
# deposited into each one
# the M_k term
- self.omk= np.zeros(self.nvals, dtype="float64")
+ self.omk= np.zeros(self.nvals, dtype="float64", order='F')
cdef np.ndarray omkarr= self.omk
self.mk= <np.float64_t*> omkarr.data
# the Q_k term
- self.oqk= np.zeros(self.nvals, dtype="float64")
+ self.oqk= np.zeros(self.nvals, dtype="float64", order='F')
cdef np.ndarray oqkarr= self.oqk
self.qk= <np.float64_t*> oqkarr.data
# particle count
- self.oi = np.zeros(self.nvals, dtype="float64")
+ self.oi = np.zeros(self.nvals, dtype="float64", order='F')
cdef np.ndarray oiarr = self.oi
self.i = <np.float64_t*> oiarr.data
@@ -368,11 +368,11 @@
cdef np.float64_t *w
cdef public object ow
def initialize(self):
- self.owf = np.zeros(self.nvals, dtype='float64')
+ self.owf = np.zeros(self.nvals, dtype='float64', order='F')
cdef np.ndarray wfarr = self.owf
self.wf = <np.float64_t*> wfarr.data
- self.ow = np.zeros(self.nvals, dtype='float64')
+ self.ow = np.zeros(self.nvals, dtype='float64', order='F')
cdef np.ndarray warr = self.ow
self.w = <np.float64_t*> warr.data
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/startup_tasks.py
--- a/yt/startup_tasks.py
+++ b/yt/startup_tasks.py
@@ -98,7 +98,17 @@
if param == "loglevel": # special case
mylog.setLevel(int(val))
-parser = argparse.ArgumentParser(description = 'yt command line arguments')
+class YTParser(argparse.ArgumentParser):
+ def error(self, message):
+ """error(message: string)
+
+ Prints a help message that is more detailed than the argparse default
+ and then exits.
+ """
+ self.print_help(sys.stderr)
+ self.exit(2, '%s: error: %s\n' % (self.prog, message))
+
+parser = YTParser(description = 'yt command line arguments')
parser.add_argument("--config", action=SetConfigOption,
help = "Set configuration option, in the form param=value")
parser.add_argument("--paste", action=SetExceptionHandling,
diff -r 9da7774e9b4ff95ca6667f4afc387e410a4ac9b9 -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d yt/utilities/amr_kdtree/amr_kdtools.py
--- a/yt/utilities/amr_kdtree/amr_kdtools.py
+++ b/yt/utilities/amr_kdtree/amr_kdtools.py
@@ -1,5 +1,5 @@
"""
-AMR kD-Tree Tools
+AMR kD-Tree Tools
Authors: Samuel Skillman <samskillman at gmail.com>
Affiliation: University of Colorado at Boulder
@@ -25,435 +25,10 @@
"""
import numpy as np
from yt.funcs import *
-from yt.utilities.lib import kdtree_get_choices
-
-def _lchild_id(node_id): return (node_id<<1)
-def _rchild_id(node_id): return (node_id<<1) + 1
-def _parent_id(node_id): return (node_id-1) >> 1
-
-class Node(object):
- def __init__(self, parent, left, right,
- left_edge, right_edge, grid_id, node_id):
- self.left = left
- self.right = right
- self.left_edge = left_edge
- self.right_edge = right_edge
- self.grid = grid_id
- self.parent = parent
- self.id = node_id
- self.data = None
- self.split = None
-
-class Split(object):
- def __init__(self, dim, pos):
- self.dim = dim
- self.pos = pos
-
-def should_i_build(node, rank, size):
- if (node.id < size) or (node.id >= 2*size):
- return True
- elif node.id - size == rank:
- return True
- else:
- return False
-
-
-def add_grid(node, gle, gre, gid, rank, size):
- if not should_i_build(node, rank, size):
- return
-
- if kd_is_leaf(node):
- insert_grid(node, gle, gre, gid, rank, size)
- else:
- less_id = gle[node.split.dim] < node.split.pos
- if less_id:
- add_grid(node.left, gle, gre,
- gid, rank, size)
-
- greater_id = gre[node.split.dim] > node.split.pos
- if greater_id:
- add_grid(node.right, gle, gre,
- gid, rank, size)
-
-
-def insert_grid(node, gle, gre, grid_id, rank, size):
- if not should_i_build(node, rank, size):
- return
-
- # If we should continue to split based on parallelism, do so!
- if should_i_split(node, rank, size):
- geo_split(node, gle, gre, grid_id, rank, size)
- return
-
- if np.all(gle <= node.left_edge) and \
- np.all(gre >= node.right_edge):
- node.grid = grid_id
- assert(node.grid is not None)
- return
-
- # Split the grid
- check = split_grid(node, gle, gre, grid_id, rank, size)
- # If check is -1, then we have found a place where there are no choices.
- # Exit out and set the node to None.
- if check == -1:
- node.grid = None
- return
-
-
-def add_grids(node, gles, gres, gids, rank, size):
- if not should_i_build(node, rank, size):
- return
-
- if kd_is_leaf(node):
- insert_grids(node, gles, gres, gids, rank, size)
- else:
- less_ids = gles[:,node.split.dim] < node.split.pos
- if len(less_ids) > 0:
- add_grids(node.left, gles[less_ids], gres[less_ids],
- gids[less_ids], rank, size)
-
- greater_ids = gres[:,node.split.dim] > node.split.pos
- if len(greater_ids) > 0:
- add_grids(node.right, gles[greater_ids], gres[greater_ids],
- gids[greater_ids], rank, size)
-
-
-def should_i_split(node, rank, size):
- return node.id < size
-
-
-def geo_split_grid(node, gle, gre, grid_id, rank, size):
- big_dim = np.argmax(gre-gle)
- new_pos = (gre[big_dim] + gle[big_dim])/2.
- old_gre = gre.copy()
- new_gle = gle.copy()
- new_gle[big_dim] = new_pos
- gre[big_dim] = new_pos
-
- split = Split(big_dim, new_pos)
-
- # Create a Split
- divide(node, split)
-
- # Populate Left Node
- #print 'Inserting left node', node.left_edge, node.right_edge
- insert_grid(node.left, gle, gre,
- grid_id, rank, size)
-
- # Populate Right Node
- #print 'Inserting right node', node.left_edge, node.right_edge
- insert_grid(node.right, new_gle, old_gre,
- grid_id, rank, size)
- return
-
-
-def geo_split(node, gles, gres, grid_ids, rank, size):
- big_dim = np.argmax(gres[0]-gles[0])
- new_pos = (gres[0][big_dim] + gles[0][big_dim])/2.
- old_gre = gres[0].copy()
- new_gle = gles[0].copy()
- new_gle[big_dim] = new_pos
- gres[0][big_dim] = new_pos
- gles = np.append(gles, np.array([new_gle]), axis=0)
- gres = np.append(gres, np.array([old_gre]), axis=0)
- grid_ids = np.append(grid_ids, grid_ids, axis=0)
-
- split = Split(big_dim, new_pos)
-
- # Create a Split
- divide(node, split)
-
- # Populate Left Node
- #print 'Inserting left node', node.left_edge, node.right_edge
- insert_grids(node.left, gles[:1], gres[:1],
- grid_ids[:1], rank, size)
-
- # Populate Right Node
- #print 'Inserting right node', node.left_edge, node.right_edge
- insert_grids(node.right, gles[1:], gres[1:],
- grid_ids[1:], rank, size)
- return
-
-def insert_grids(node, gles, gres, grid_ids, rank, size):
- if not should_i_build(node, rank, size) or grid_ids.size == 0:
- return
-
- if len(grid_ids) == 1:
- # If we should continue to split based on parallelism, do so!
- if should_i_split(node, rank, size):
- geo_split(node, gles, gres, grid_ids, rank, size)
- return
-
- if np.all(gles[0] <= node.left_edge) and \
- np.all(gres[0] >= node.right_edge):
- node.grid = grid_ids[0]
- assert(node.grid is not None)
- return
-
- # Split the grids
- check = split_grids(node, gles, gres, grid_ids, rank, size)
- # If check is -1, then we have found a place where there are no choices.
- # Exit out and set the node to None.
- if check == -1:
- node.grid = None
- return
-
-def split_grid(node, gle, gre, grid_id, rank, size):
- # Find a Split
- data = np.array([(gle[:], gre[:])], copy=False)
- best_dim, split_pos, less_id, greater_id = \
- kdtree_get_choices(data, node.left_edge, node.right_edge)
-
- # If best_dim is -1, then we have found a place where there are no choices.
- # Exit out and set the node to None.
- if best_dim == -1:
- return -1
-
- split = Split(best_dim, split_pos)
-
- del data, best_dim, split_pos
-
- # Create a Split
- divide(node, split)
-
- # Populate Left Node
- #print 'Inserting left node', node.left_edge, node.right_edge
- if less_id:
- insert_grid(node.left, gle, gre,
- grid_id, rank, size)
-
- # Populate Right Node
- #print 'Inserting right node', node.left_edge, node.right_edge
- if greater_id:
- insert_grid(node.right, gle, gre,
- grid_id, rank, size)
-
- return
-
-
-def split_grids(node, gles, gres, grid_ids, rank, size):
- # Find a Split
- data = np.array([(gles[i,:], gres[i,:]) for i in
- xrange(grid_ids.shape[0])], copy=False)
- best_dim, split_pos, less_ids, greater_ids = \
- kdtree_get_choices(data, node.left_edge, node.right_edge)
-
- # If best_dim is -1, then we have found a place where there are no choices.
- # Exit out and set the node to None.
- if best_dim == -1:
- return -1
-
- split = Split(best_dim, split_pos)
-
- del data, best_dim, split_pos
-
- # Create a Split
- divide(node, split)
-
- # Populate Left Node
- #print 'Inserting left node', node.left_edge, node.right_edge
- insert_grids(node.left, gles[less_ids], gres[less_ids],
- grid_ids[less_ids], rank, size)
-
- # Populate Right Node
- #print 'Inserting right node', node.left_edge, node.right_edge
- insert_grids(node.right, gles[greater_ids], gres[greater_ids],
- grid_ids[greater_ids], rank, size)
-
- return
-
-def new_right(Node, split):
- new_right = Node.right_edge.copy()
- new_right[split.dim] = split.pos
- return new_right
-
-def new_left(Node, split):
- new_left = Node.left_edge.copy()
- new_left[split.dim] = split.pos
- return new_left
-
-def divide(node, split):
- # Create a Split
- node.split = split
- node.left = Node(node, None, None,
- node.left_edge, new_right(node, split), node.grid,
- _lchild_id(node.id))
- node.right = Node(node, None, None,
- new_left(node, split), node.right_edge, node.grid,
- _rchild_id(node.id))
- return
-
-def kd_sum_volume(node):
- if (node.left is None) and (node.right is None):
- if node.grid is None:
- return 0.0
- return np.prod(node.right_edge - node.left_edge)
- else:
- return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-def kd_sum_cells(node):
- if (node.left is None) and (node.right is None):
- if node.grid is None:
- return 0.0
- return np.prod(node.right_edge - node.left_edge)
- else:
- return kd_sum_volume(node.left) + kd_sum_volume(node.right)
-
-
-def kd_node_check(node):
- assert (node.left is None) == (node.right is None)
- if (node.left is None) and (node.right is None):
- if node.grid is not None:
- return np.prod(node.right_edge - node.left_edge)
- else: return 0.0
- else:
- return kd_node_check(node.left)+kd_node_check(node.right)
-
-def kd_is_leaf(node):
- no_l_child = node.left is None
- no_r_child = node.right is None
- assert no_l_child == no_r_child
- return no_l_child
-
-def step_depth(current, previous):
- '''
- Takes a single step in the depth-first traversal
- '''
- if kd_is_leaf(current): # At a leaf, move back up
- previous = current
- current = current.parent
-
- elif current.parent is previous: # Moving down, go left first
- previous = current
- if current.left is not None:
- current = current.left
- elif current.right is not None:
- current = current.right
- else:
- current = current.parent
-
- elif current.left is previous: # Moving up from left, go right
- previous = current
- if current.right is not None:
- current = current.right
- else:
- current = current.parent
-
- elif current.right is previous: # Moving up from right child, move up
- previous = current
- current = current.parent
-
- return current, previous
-
-def depth_traverse(tree, max_node=None):
- '''
- Yields a depth-first traversal of the kd tree always going to
- the left child before the right.
- '''
- current = tree.trunk
- previous = None
- if max_node is None:
- max_node = np.inf
- while current is not None:
- yield current
- current, previous = step_depth(current, previous)
- if current is None: break
- if current.id >= max_node:
- current = current.parent
- previous = current.right
-
-def depth_first_touch(tree, max_node=None):
- '''
- Yields a depth-first traversal of the kd tree always going to
- the left child before the right.
- '''
- current = tree.trunk
- previous = None
- if max_node is None:
- max_node = np.inf
- while current is not None:
- if previous is None or previous.parent != current:
- yield current
- current, previous = step_depth(current, previous)
- if current is None: break
- if current.id >= max_node:
- current = current.parent
- previous = current.right
-
-def breadth_traverse(tree):
- '''
- Yields a breadth-first traversal of the kd tree always going to
- the left child before the right.
- '''
- current = tree.trunk
- previous = None
- while current is not None:
- yield current
- current, previous = step_depth(current, previous)
-
-
-def viewpoint_traverse(tree, viewpoint):
- '''
- Yields a viewpoint dependent traversal of the kd-tree. Starts
- with nodes furthest away from viewpoint.
- '''
-
- current = tree.trunk
- previous = None
- while current is not None:
- yield current
- current, previous = step_viewpoint(current, previous, viewpoint)
-
-def step_viewpoint(current, previous, viewpoint):
- '''
- Takes a single step in the viewpoint based traversal. Always
- goes to the node furthest away from viewpoint first.
- '''
- if kd_is_leaf(current): # At a leaf, move back up
- previous = current
- current = current.parent
- elif current.split.dim is None: # This is a dead node
- previous = current
- current = current.parent
-
- elif current.parent is previous: # Moving down
- previous = current
- if viewpoint[current.split.dim] <= current.split.pos:
- if current.right is not None:
- current = current.right
- else:
- previous = current.right
- else:
- if current.left is not None:
- current = current.left
- else:
- previous = current.left
-
- elif current.right is previous: # Moving up from right
- previous = current
- if viewpoint[current.split.dim] <= current.split.pos:
- if current.left is not None:
- current = current.left
- else:
- current = current.parent
- else:
- current = current.parent
-
- elif current.left is previous: # Moving up from left child
- previous = current
- if viewpoint[current.split.dim] > current.split.pos:
- if current.right is not None:
- current = current.right
- else:
- current = current.parent
- else:
- current = current.parent
-
- return current, previous
def receive_and_reduce(comm, incoming_rank, image, add_to_front):
- mylog.debug( 'Receiving image from %04i' % incoming_rank)
+ mylog.debug('Receiving image from %04i' % incoming_rank)
#mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner))
arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape(
(image.shape[0], image.shape[1], image.shape[2]))
@@ -470,36 +45,24 @@
np.add(image, front, image)
return image
- ta = 1.0 - front[:,:,3]
+ ta = 1.0 - front[:, :, 3]
np.maximum(ta, 0.0, ta)
# This now does the following calculation, but in a memory
# conservative fashion
# image[:,:,i ] = front[:,:,i] + ta*back[:,:,i]
image = back.copy()
for i in range(4):
- np.multiply(image[:,:,i], ta, image[:,:,i])
+ np.multiply(image[:, :, i], ta, image[:, :, i])
np.add(image, front, image)
return image
+
def send_to_parent(comm, outgoing_rank, image):
- mylog.debug( 'Sending image to %04i' % outgoing_rank)
+ mylog.debug('Sending image to %04i' % outgoing_rank)
comm.send_array(image, outgoing_rank, tag=comm.rank)
+
def scatter_image(comm, root, image):
- mylog.debug( 'Scattering from %04i' % root)
+ mylog.debug('Scattering from %04i' % root)
image = comm.mpi_bcast(image, root=root)
return image
-
-def find_node(node, pos):
- """
- Find the AMRKDTree node enclosing a position
- """
- assert(np.all(node.left_edge <= pos))
- assert(np.all(node.right_edge > pos))
- while not kd_is_leaf(node):
- if pos[node.split.dim] < node.split.pos:
- node = node.left
- else:
- node = node.right
- return node
-
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/cc228d860feb/
Changeset: cc228d860feb
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-19 01:14:46
Summary: fixing a closure problem
Affected #: 2 files
diff -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d -r cc228d860febdaa12f664a34af2fba312066359f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -231,19 +231,21 @@
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
-def _StarAngularMomentumVector(data):
+def _StarAngularMomentumVector(data, ftype=None):
"""
This function returns the mass-weighted average angular momentum vector
for stars.
"""
- is_star = data["creation_time"] > 0
- star_mass = data["ParticleMassMsun"][is_star]
- sLx = data["ParticleSpecificAngularMomentumX"][is_star]
- sLy = data["ParticleSpecificAngularMomentumY"][is_star]
- sLz = data["ParticleSpecificAngularMomentumZ"][is_star]
- amx = sLx * star_mass
- amy = sLy * star_mass
- amz = sLz * star_mass
+ if ftype is None:
+ is_star = data["creation_time"] > 0
+ star_mass = data["ParticleMassMsun"][is_star]
+ else:
+ is_star = Ellipsis
+ key = (ftype, "ParticleSpecificAngularMomentum%s")
+ j_mag = np.ones(3, dtype='f8')
+ for i, ax in enumerate("XYZ"):
+ j_mag[i] = data[key % ax][is_star]
+ j_mag[i] *= star_mass
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
diff -r d36695e300026f1d26b8b8dd2e04a4a25a02eb8d -r cc228d860febdaa12f664a34af2fba312066359f yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -715,7 +715,7 @@
xv = data["particle_velocity_x"] - bv[0]
yv = data["particle_velocity_y"] - bv[1]
return xv*y - yv*x
-for ax in 'XYZ':
+def with_axis(ax):
n = "ParticleSpecificAngularMomentum%s" % ax
add_field(n, function=eval("_%s" % n), particle_type=True,
convert_function=_convertSpecificAngularMomentum,
@@ -723,6 +723,8 @@
add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
convert_function=_convertSpecificAngularMomentumKMSMPC,
units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+for ax in 'XYZ':
+ with_axis(ax)
def _ParticleAngularMomentum(field, data):
return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
https://bitbucket.org/yt_analysis/yt-3.0/commits/6359c7eceb54/
Changeset: 6359c7eceb54
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-19 01:30:17
Summary: fixed infuriating closures
Affected #: 1 file
diff -r cc228d860febdaa12f664a34af2fba312066359f -r 6359c7eceb544eab7e930efc8b365fcd35c4b5b3 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -686,6 +686,7 @@
# units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
def _ParticleSpecificAngularMomentumX(field, data):
+ print 'x'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
@@ -695,7 +696,15 @@
yv = data["particle_velocity_y"] - bv[1]
zv = data["particle_velocity_z"] - bv[2]
return yv*z - zv*y
+add_field("ParticleSpecificAngularMomentumX",
+ function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumY(field, data):
+ print 'y'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
@@ -705,7 +714,15 @@
xv = data["particle_velocity_x"] - bv[0]
zv = data["particle_velocity_z"] - bv[2]
return -(xv*z - zv*x)
+add_field("ParticleSpecificAngularMomentumY",
+ function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumZ(field, data):
+ print 'z'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
@@ -715,16 +732,13 @@
xv = data["particle_velocity_x"] - bv[0]
yv = data["particle_velocity_y"] - bv[1]
return xv*y - yv*x
-def with_axis(ax):
- n = "ParticleSpecificAngularMomentum%s" % ax
- add_field(n, function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentum,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
- add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentumKMSMPC,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
-for ax in 'XYZ':
- with_axis(ax)
+add_field("ParticleSpecificAngularMomentumZ",
+ function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleAngularMomentum(field, data):
return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
@@ -744,12 +758,12 @@
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
https://bitbucket.org/yt_analysis/yt-3.0/commits/b6d6b9082ebb/
Changeset: b6d6b9082ebb
Branch: yt-3.0
User: juxtaposicion
Date: 2013-07-19 01:33:59
Summary: removed prints
Affected #: 1 file
diff -r 6359c7eceb544eab7e930efc8b365fcd35c4b5b3 -r b6d6b9082ebb74b645ee43fdddd99b866f448459 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -686,7 +686,6 @@
# units=r"\rm{km}\rm{Mpc}/\rm{s}", validators=[ValidateParameter('center')])
def _ParticleSpecificAngularMomentumX(field, data):
- print 'x'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
@@ -704,7 +703,6 @@
convert_function=_convertSpecificAngularMomentumKMSMPC,
units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumY(field, data):
- print 'y'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
@@ -722,7 +720,6 @@
convert_function=_convertSpecificAngularMomentumKMSMPC,
units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumZ(field, data):
- print 'z'
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
else: bv = np.zeros(3, dtype='float64')
https://bitbucket.org/yt_analysis/yt-3.0/commits/f024c47e7670/
Changeset: f024c47e7670
Branch: yt-3.0
User: juxtaposicion
Date: 2013-08-22 07:42:49
Summary: merge
Affected #: 48 files
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -12,13 +12,16 @@
yt/frontends/sph/smoothing_kernel.c
yt/geometry/fake_octree.c
yt/geometry/oct_container.c
+yt/geometry/oct_visitors.c
yt/geometry/particle_deposit.c
+yt/geometry/particle_oct_container.c
yt/geometry/selection_routines.c
yt/utilities/amr_utils.c
yt/utilities/kdtree/forthonf2c.h
yt/utilities/libconfig_wrapper.c
yt/utilities/spatial/ckdtree.c
yt/utilities/lib/alt_ray_tracers.c
+yt/utilities/lib/amr_kdtools.c
yt/utilities/lib/CICDeposit.c
yt/utilities/lib/ContourFinding.c
yt/utilities/lib/DepthFirstOctree.c
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/analysis_modules/halo_profiler/standard_analysis.py
--- a/yt/analysis_modules/halo_profiler/standard_analysis.py
+++ b/yt/analysis_modules/halo_profiler/standard_analysis.py
@@ -30,6 +30,7 @@
class StandardRadialAnalysis(object):
def __init__(self, pf, center, radius, n_bins = 128, inner_radius = None):
+ raise NotImplementedError # see TODO
self.pf = pf
# We actually don't want to replicate the handling of setting the
# center here, so we will pass it to the sphere creator.
@@ -53,6 +54,7 @@
prof = BinnedProfile1D(self.obj, self.n_bins, "Radius",
self.inner_radius, self.outer_radius)
by_weights = defaultdict(list)
+ # TODO: analysis_field_list is undefined
for fspec in analysis_field_list:
if isinstance(fspec, types.TupleType) and len(fspec) == 2:
field, weight = fspec
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/analyzer_objects.py
--- a/yt/data_objects/analyzer_objects.py
+++ b/yt/data_objects/analyzer_objects.py
@@ -80,7 +80,7 @@
def eval(self, pf):
slc = self.SlicePlot(pf, self.axis, self.field, center = self.center)
- return pc.save()
+ return slc.save()
class QuantityProxy(AnalysisTask):
_params = None
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -36,6 +36,7 @@
import fileinput
from re import finditer
+from yt.config import ytcfg
from yt.funcs import *
from yt.utilities.logger import ytLogger
from .data_containers import \
@@ -703,7 +704,7 @@
new_fields = []
for input_field in level_state.fields:
output_field = np.zeros(output_dims, dtype="float64")
- output_left = self.global_startindex + 0.5
+ output_left = level_state.global_startindex + 0.5
ghost_zone_interpolate(rf, input_field, input_left,
output_field, output_left)
new_fields.append(output_field)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -446,7 +446,7 @@
dd['units'] = self._units
dd['projected_units'] = self._projected_units,
dd['take_log'] = self.take_log
- dd['validators'] = self.validators.copy()
+ dd['validators'] = list(self.validators)
dd['particle_type'] = self.particle_type
dd['vector_field'] = self.vector_field
dd['display_field'] = True
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -489,6 +489,7 @@
op.initialize()
op.process_grid(self, positions, fields)
vals = op.finalize()
+ if vals is None: return
return vals.reshape(self.ActiveDimensions, order="C")
def _get_selector_mask(self, selector):
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -121,6 +121,7 @@
op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
self.domain_id, self._domain_offset)
vals = op.finalize()
+ if vals is None: return
return np.asfortranarray(vals)
def select_icoords(self, dobj):
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -113,6 +113,19 @@
particle_type = True,
units = r"\mathrm{M}_\odot")
+ def particle_mesh_ids(field, data):
+ pos = data[ptype, coord_name]
+ ids = np.zeros(pos.shape[0], dtype="float64") - 1
+ # This is float64 in name only. It will be properly cast inside the
+ # deposit operation.
+ #_ids = ids.view("float64")
+ data.deposit(pos, [ids], method = "mesh_id")
+ return ids
+ registry.add_field((ptype, "mesh_id"),
+ function = particle_mesh_ids,
+ validators = [ValidateSpatial()],
+ particle_type = True)
+
return list(set(registry.keys()).difference(orig))
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -545,6 +545,8 @@
self.total_stuff = source_data.sum()
binned_field = self._get_empty_field()
weight_field = self._get_empty_field()
+ m_field = self._get_empty_field()
+ q_field = self._get_empty_field()
used_field = self._get_empty_field()
mi = args[0]
bin_indices_x = args[1][self.indices].ravel().astype('int64')
@@ -553,8 +555,8 @@
weight_data = weight_data[mi][self.indices]
nx = bin_indices_x.size
#mylog.debug("Binning %s / %s times", source_data.size, nx)
- Bin2DProfile(bin_indices_x, bin_indices_y, weight_data, source_data,
- weight_field, binned_field, used_field)
+ bin_profile2d(bin_indices_x, bin_indices_y, weight_data, source_data,
+ weight_field, binned_field, m_field, q_field, used_field)
if accumulation: # Fix for laziness
if not iterable(accumulation):
raise SyntaxError("Accumulation needs to have length 2")
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -194,7 +194,7 @@
ts = np.abs(ts)
self._dts[grid.id] = dts
self._ts[grid.id] = ts
- self._masks[grid.id] = masks
+ self._masks[grid.id] = mask
return mask
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -98,4 +98,5 @@
if fname.startswith("Overdensity"): continue
if FieldInfo[field].particle_type: continue
for nproc in [1, 4, 8]:
+ test_all_fields.__name__ = "%s_%s" % (field, nproc)
yield TestFieldAccess(field, nproc)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -376,12 +376,6 @@
add_field("DynamicalTime", function=_DynamicalTime,
units=r"\rm{s}")
-def JeansMassMsun(field,data):
- return (MJ_constant *
- ((data["Temperature"]/data["MeanMolecularWeight"])**(1.5)) *
- (data["Density"]**(-0.5)))
-add_field("JeansMassMsun",function=JeansMassMsun,units=r"\rm{Msun}")
-
def _CellMass(field, data):
return data["Density"] * data["CellVolume"]
def _convertCellMassMsun(data):
@@ -619,7 +613,7 @@
def _convertSpecificAngularMomentum(data):
return data.convert("cm")
def _convertSpecificAngularMomentumKMSMPC(data):
- return data.convert("mpc")/1e5
+ return km_per_cm*data.convert("mpc")
def _SpecificAngularMomentumX(field, data):
xv, yv, zv = obtain_velocities(data)
@@ -678,8 +672,6 @@
# function=_ParticleSpecificAngularMomentum, particle_type=True,
# convert_function=_convertSpecificAngularMomentum, vector_field=True,
# units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter('center')])
-def _convertSpecificAngularMomentumKMSMPC(data):
- return km_per_cm*data.convert("mpc")
#add_field("ParticleSpecificAngularMomentumKMSMPC",
# function=_ParticleSpecificAngularMomentum, particle_type=True,
# convert_function=_convertSpecificAngularMomentumKMSMPC, vector_field=True,
@@ -993,7 +985,7 @@
phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
- sphp = get_sph_phi_component(vel, theta, phi, normal)
+ sphp = get_sph_phi_component(vel, phi, normal)
return sphp
add_field("ParticlePhiVelocity", function=_ParticleThetaVelocity,
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/enzo/data_structures.py
--- a/yt/frontends/enzo/data_structures.py
+++ b/yt/frontends/enzo/data_structures.py
@@ -675,7 +675,7 @@
class EnzoHierarchy1D(EnzoHierarchy):
- def _fill_arrays(self, ei, si, LE, RE, npart):
+ def _fill_arrays(self, ei, si, LE, RE, npart, nap):
self.grid_dimensions[:,:1] = ei
self.grid_dimensions[:,:1] -= np.array(si, self.float_type)
self.grid_dimensions += 1
@@ -685,10 +685,12 @@
self.grid_left_edge[:,1:] = 0.0
self.grid_right_edge[:,1:] = 1.0
self.grid_dimensions[:,1:] = 1
+ if nap is not None:
+ raise NotImplementedError
class EnzoHierarchy2D(EnzoHierarchy):
- def _fill_arrays(self, ei, si, LE, RE, npart):
+ def _fill_arrays(self, ei, si, LE, RE, npart, nap):
self.grid_dimensions[:,:2] = ei
self.grid_dimensions[:,:2] -= np.array(si, self.float_type)
self.grid_dimensions += 1
@@ -698,6 +700,8 @@
self.grid_left_edge[:,2] = 0.0
self.grid_right_edge[:,2] = 1.0
self.grid_dimensions[:,2] = 1
+ if nap is not None:
+ raise NotImplementedError
class EnzoStaticOutput(StaticOutput):
"""
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/enzo/io.py
--- a/yt/frontends/enzo/io.py
+++ b/yt/frontends/enzo/io.py
@@ -277,6 +277,33 @@
(grid.id, field)).transpose()
return t
+ def _read_fluid_selection(self, chunks, selector, fields, size):
+ rv = {}
+ # Now we have to do something unpleasant
+ chunks = list(chunks)
+ if selector.__class__.__name__ == "GridSelector":
+ return self._read_grid_chunk(chunks, fields)
+ if any((ftype != "gas" for ftype, fname in fields)):
+ raise NotImplementedError
+ for field in fields:
+ ftype, fname = field
+ fsize = size
+ rv[field] = np.empty(fsize, dtype="float64")
+ ng = sum(len(c.objs) for c in chunks)
+ mylog.debug("Reading %s cells of %s fields in %s grids",
+ size, [f2 for f1, f2 in fields], ng)
+ ind = 0
+ for chunk in chunks:
+ data = self._read_chunk_data(chunk, fields)
+ for g in chunk.objs:
+ for field in fields:
+ ftype, fname = field
+ ds = np.atleast_3d(data[g.id].pop(fname))
+ nd = g.select(selector, ds, rv[field], ind) # caches
+ ind += nd
+ data.pop(g.id)
+ return rv
+
class IOHandlerPacked1D(IOHandlerPackedHDF5):
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -28,6 +28,7 @@
import stat
import weakref
import struct
+import glob
from itertools import izip
from yt.utilities.fortran_utils import read_record
@@ -46,6 +47,7 @@
G, \
gravitational_constant_cgs, \
km_per_pc, \
+ cm_per_kpc, \
mass_sun_cgs
from yt.utilities.cosmology import Cosmology
from .fields import \
@@ -107,11 +109,12 @@
mpch = {}
mpch.update(mpc_conversion)
unit_base = self._unit_base or {}
- for unit in mpc_conversion:
- mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
- mpch['%shcm' % unit] = (mpch["%sh" % unit] /
- (1 + self.current_redshift))
- mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
+ if self.cosmological_simulation:
+ for unit in mpc_conversion:
+ mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
+ mpch['%shcm' % unit] = (mpch["%sh" % unit] /
+ (1 + self.current_redshift))
+ mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
# ud == unit destination
# ur == unit registry
for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:
@@ -151,7 +154,8 @@
def __init__(self, filename, data_style="gadget_binary",
additional_fields = (),
- unit_base = None):
+ unit_base = None, n_ref = 64):
+ self.n_ref = n_ref
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
@@ -264,10 +268,11 @@
_particle_coordinates_name = "Coordinates"
_header_spec = None # Override so that there's no confusion
- def __init__(self, filename, data_style="OWLS"):
+ def __init__(self, filename, data_style="OWLS", n_ref = 64):
self.storage_filename = None
super(OWLSStaticOutput, self).__init__(filename, data_style,
- unit_base = None)
+ unit_base = None,
+ n_ref = n_ref)
def __repr__(self):
return os.path.basename(self.parameter_filename).split(".")[0]
@@ -357,7 +362,10 @@
domain_left_edge = None,
domain_right_edge = None,
unit_base = None,
- cosmology_parameters = None):
+ cosmology_parameters = None,
+ parameter_file = None,
+ n_ref = 64):
+ self.n_ref = n_ref
self.endian = endian
self.storage_filename = None
if domain_left_edge is None:
@@ -375,6 +383,7 @@
self._unit_base = unit_base or {}
self._cosmology_parameters = cosmology_parameters
+ self._param_file = parameter_file
super(TipsyStaticOutput, self).__init__(filename, data_style)
def __repr__(self):
@@ -382,44 +391,74 @@
def _parse_parameter_file(self):
- # The entries in this header are capitalized and named to match Table 4
- # in the GADGET-2 user guide.
+ # Parsing the header of the tipsy file, from this we obtain
+ # the snapshot time and particle counts.
f = open(self.parameter_filename, "rb")
hh = self.endian + "".join(["%s" % (b) for a,b in self._header_spec])
hvals = dict([(a, c) for (a, b), c in zip(self._header_spec,
struct.unpack(hh, f.read(struct.calcsize(hh))))])
+ self.parameters.update(hvals)
self._header_offset = f.tell()
+ # These are always true, for now.
self.dimensionality = 3
self.refine_by = 2
self.parameters["HydroMethod"] = "sph"
+
self.unique_identifier = \
int(os.stat(self.parameter_filename)[stat.ST_CTIME])
- # Set standard values
- # This may not be correct.
+ # Read in parameter file, if available.
+ if self._param_file is None:
+ pfn = glob.glob(os.path.join(self.directory, "*.param"))
+ assert len(pfn) < 2, \
+ "More than one param file is in the data directory"
+ if pfn == []:
+ pfn = None
+ else:
+ pfn = pfn[0]
+ else:
+ pfn = self._param_file
+
+ if pfn is not None:
+ for line in (l.strip() for l in open(pfn)):
+ # skip comment lines and blank lines
+ l = line.strip()
+ if l.startswith('#') or l == '':
+ continue
+ # parse parameters according to tipsy parameter type
+ param, val = (i.strip() for i in line.split('=',1))
+ if param.startswith('n') or param.startswith('i'):
+ val = long(val)
+ elif param.startswith('d'):
+ val = float(val)
+ elif param.startswith('b'):
+ val = bool(float(val))
+ self.parameters[param] = val
+
self.current_time = hvals["time"]
+ self.domain_dimensions = np.ones(3, "int32") * 2
+ if self.parameters.get('bPeriodic', True):
+ self.periodicity = (True, True, True)
+ else:
+ self.periodicity = (False, False, False)
- # NOTE: These are now set in the main initializer.
- #self.domain_left_edge = np.zeros(3, "float64") - 0.5
- #self.domain_right_edge = np.ones(3, "float64") + 0.5
- self.domain_dimensions = np.ones(3, "int32") * 2
- self.periodicity = (True, True, True)
-
- self.cosmological_simulation = 1
-
- cosm = self._cosmology_parameters or {}
- dcosm = dict(current_redshift = 0.0,
- omega_lambda = 0.0,
- omega_matter = 0.0,
- hubble_constant = 1.0)
- for param in ['current_redshift', 'omega_lambda',
- 'omega_matter', 'hubble_constant']:
- pval = cosm.get(param, dcosm[param])
- setattr(self, param, pval)
-
- self.parameters = hvals
+ if self.parameters.get('bComove', True):
+ self.cosmological_simulation = 1
+ cosm = self._cosmology_parameters or {}
+ dcosm = dict(current_redshift = 0.0,
+ omega_lambda = 0.0,
+ omega_matter = 0.0,
+ hubble_constant = 1.0)
+ for param in ['current_redshift', 'omega_lambda',
+ 'omega_matter', 'hubble_constant']:
+ pval = cosm.get(param, dcosm[param])
+ setattr(self, param, pval)
+ else:
+ self.cosmological_simulation = 0.0
+ kpc_unit = self.parameters.get('dKpcUnit', 1.0)
+ self._unit_base['cm'] = 1.0 / (kpc_unit * cm_per_kpc)
self.filename_template = self.parameter_filename
self.file_count = 1
@@ -428,12 +467,17 @@
def _set_units(self):
super(TipsyStaticOutput, self)._set_units()
- DW = (self.domain_right_edge - self.domain_left_edge).max()
- cosmo = Cosmology(self.hubble_constant * 100.0,
- self.omega_matter, self.omega_lambda)
- length_unit = DW * self.units['cm'] # Get it in proper cm
- density_unit = cosmo.CriticalDensity(self.current_redshift)
- mass_unit = density_unit * length_unit**3
+ if self.cosmological_simulation:
+ DW = (self.domain_right_edge - self.domain_left_edge).max()
+ cosmo = Cosmology(self.hubble_constant * 100.0,
+ self.omega_matter, self.omega_lambda)
+ length_unit = DW * self.units['cm'] # Get it in proper cm
+ density_unit = cosmo.CriticalDensity(self.current_redshift)
+ mass_unit = density_unit * length_unit**3
+ else:
+ mass_unit = self.parameters.get('dMsolUnit', 1.0) * mass_sun_cgs
+ length_unit = self.parameters.get('dKpcUnit', 1.0) * cm_per_kpc
+ density_unit = mass_unit / length_unit**3
time_unit = 1.0 / np.sqrt(G*density_unit)
velocity_unit = length_unit / time_unit
self.conversion_factors["velocity"] = velocity_unit
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -380,6 +380,12 @@
rv[field] = np.empty(size, dtype="float64")
if size == 0: continue
rv[field][:] = vals[field][mask]
+ if field == "Coordinates":
+ eps = np.finfo(rv[field].dtype).eps
+ for i in range(3):
+ rv[field][:,i] = np.clip(rv[field][:,i],
+ self.domain_left_edge[i] + eps,
+ self.domain_right_edge[i] - eps)
return rv
def _read_particle_selection(self, chunks, selector, fields):
@@ -421,6 +427,8 @@
ind = 0
DLE, DRE = pf.domain_left_edge, pf.domain_right_edge
dx = (DRE - DLE) / (2**_ORDER_MAX)
+ self.domain_left_edge = DLE
+ self.domain_right_edge = DRE
with open(data_file.filename, "rb") as f:
f.seek(pf._header_offset)
for iptype, ptype in enumerate(self._ptypes):
@@ -446,9 +454,11 @@
pf.domain_left_edge,
pf.domain_right_edge)
pos = np.empty((pp.size, 3), dtype="float64")
- pos[:,0] = pp["Coordinates"]["x"]
- pos[:,1] = pp["Coordinates"]["y"]
- pos[:,2] = pp["Coordinates"]["z"]
+ for i, ax in enumerate("xyz"):
+ eps = np.finfo(pp["Coordinates"][ax].dtype).eps
+ pos[:,i] = np.clip(pp["Coordinates"][ax],
+ pf.domain_left_edge[i] + eps,
+ pf.domain_right_edge[i] - eps)
regions.add_data_file(pos, data_file.file_id)
morton[ind:ind+c] = compute_morton(
pos[:,0], pos[:,1], pos[:,2],
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/stream/api.py
--- a/yt/frontends/stream/api.py
+++ b/yt/frontends/stream/api.py
@@ -31,6 +31,7 @@
StreamHandler, \
load_uniform_grid, \
load_amr_grids, \
+ load_particles, \
refine_amr
from .fields import \
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/frontends/stream/data_structures.py
--- a/yt/frontends/stream/data_structures.py
+++ b/yt/frontends/stream/data_structures.py
@@ -733,9 +733,11 @@
_data_style = "stream_particles"
file_count = 1
filename_template = "stream_file"
+ n_ref = 64
def load_particles(data, sim_unit_to_cm, bbox=None,
- sim_time=0.0, periodicity=(True, True, True)):
+ sim_time=0.0, periodicity=(True, True, True),
+ n_ref = 64):
r"""Load a set of particles into yt as a
:class:`~yt.frontends.stream.data_structures.StreamParticleHandler`.
@@ -764,6 +766,9 @@
periodicity : tuple of booleans
Determines whether the data will be treated as periodic along
each axis
+ n_ref : int
+ The number of particles that result in refining an oct used for
+ indexing the particles.
Examples
--------
@@ -818,6 +823,7 @@
handler.cosmology_simulation = 0
spf = StreamParticlesStaticOutput(handler)
+ spf.n_ref = n_ref
spf.units["cm"] = sim_unit_to_cm
spf.units['1'] = 1.0
spf.units["unitary"] = 1.0
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -36,7 +36,7 @@
ParallelAnalysisInterface, parallel_splitter
from yt.utilities.lib.misc_utilities import \
pixelize_cylinder
-import yt.visualization._MPL
+import yt.visualization._MPL as _MPL
from .cartesian_fields import CartesianFieldInfo
from .cylindrical_fields import CylindricalFieldInfo, PolarFieldInfo
@@ -103,7 +103,7 @@
c2[...,0] = ((coord[...,0] - center[0])**2.0
+ (coord[...,1] - center[1])**2.0)**0.5
c2[...,1] = coord[...,2] # rzt
- c2[...,2] = np.arctans(coord[...,1] - center[1],
+ c2[...,2] = np.arctan2(coord[...,1] - center[1],
coord[...,0] - center[0])
return c2
@@ -145,7 +145,7 @@
data_source['py'], data_source['pdx'],
data_source['pdy'], data_source['pdz'],
data_source.center, data_source._inv_mat, indices,
- data_source[item], size[0], size[1], bounds).transpose()
+ data_source[field], size[0], size[1], bounds).transpose()
return buff
def convert_from_cartesian(self, coord):
@@ -258,7 +258,7 @@
@property
def period(self):
- return na.array([0.0, 0.0, 2.0*np.pi])
+ return np.array([0.0, 0.0, 2.0*np.pi])
class CylindricalCoordinateHandler(CoordinateHandler):
@@ -331,5 +331,5 @@
@property
def period(self):
- return na.array([0.0, 0.0, 2.0*np.pi])
+ return np.array([0.0, 0.0, 2.0*np.pi])
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/cylindrical_fields.py
--- a/yt/geometry/cylindrical_fields.py
+++ b/yt/geometry/cylindrical_fields.py
@@ -35,6 +35,8 @@
NeedsDataField, \
NeedsProperty, \
NeedsParameter
+from yt.utilities.exceptions import \
+ YTCoordinateNotImplemented
CylindricalFieldInfo = FieldInfoContainer()
CylindricalFieldInfo.name = id(CylindricalFieldInfo)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/oct_container.pxd
--- a/yt/geometry/oct_container.pxd
+++ b/yt/geometry/oct_container.pxd
@@ -53,6 +53,7 @@
cdef OctAllocationContainer *cont
cdef OctAllocationContainer **domains
cdef Oct ****root_mesh
+ cdef oct_visitor_function *fill_func
cdef int partial_coverage
cdef int nn[3]
cdef np.float64_t DLE[3], DRE[3]
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/oct_container.pyx
--- a/yt/geometry/oct_container.pyx
+++ b/yt/geometry/oct_container.pyx
@@ -109,6 +109,7 @@
self.DLE[i] = domain_left_edge[i] #0
self.DRE[i] = domain_right_edge[i] #num_grid
self._initialize_root_mesh()
+ self.fill_func = oct_visitors.fill_file_indices_oind
def _initialize_root_mesh(self):
self.root_mesh = <Oct****> malloc(sizeof(void*) * self.nn[0])
@@ -597,7 +598,7 @@
p[2] = cell_inds.data
data.array = p
data.domain = domain_id
- self.visit_all_octs(selector, oct_visitors.fill_file_indices, &data)
+ self.visit_all_octs(selector, self.fill_func, &data)
return levels, cell_inds, file_inds
@cython.boundscheck(False)
@@ -657,6 +658,7 @@
for i in range(3):
self.DLE[i] = domain_left_edge[i] #0
self.DRE[i] = domain_right_edge[i] #num_grid
+ self.fill_func = oct_visitors.fill_file_indices_rind
cdef int get_root(self, int ind[3], Oct **o):
o[0] = NULL
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/oct_visitors.pxd
--- a/yt/geometry/oct_visitors.pxd
+++ b/yt/geometry/oct_visitors.pxd
@@ -60,7 +60,8 @@
cdef oct_visitor_function copy_array_i64
cdef oct_visitor_function identify_octs
cdef oct_visitor_function assign_domain_ind
-cdef oct_visitor_function fill_file_indices
+cdef oct_visitor_function fill_file_indices_oind
+cdef oct_visitor_function fill_file_indices_rind
cdef inline int cind(int i, int j, int k):
return (((i*2)+j)*2+k)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/oct_visitors.pyx
--- a/yt/geometry/oct_visitors.pyx
+++ b/yt/geometry/oct_visitors.pyx
@@ -152,7 +152,7 @@
o.domain_ind = data.global_index
data.index += 1
-cdef void fill_file_indices(Oct *o, OctVisitorData *data, np.uint8_t selected):
+cdef void fill_file_indices_oind(Oct *o, OctVisitorData *data, np.uint8_t selected):
# We fill these arrays, then inside the level filler we use these as
# indices as we fill a second array from the data.
if selected == 0: return
@@ -164,3 +164,16 @@
find_arr[data.index] = o.file_ind
cell_arr[data.index] = oind(data)
data.index +=1
+
+cdef void fill_file_indices_rind(Oct *o, OctVisitorData *data, np.uint8_t selected):
+ # We fill these arrays, then inside the level filler we use these as
+ # indices as we fill a second array from the data.
+ if selected == 0: return
+ cdef void **p = <void **> data.array
+ cdef np.uint8_t *level_arr = <np.uint8_t *> p[0]
+ cdef np.int64_t *find_arr = <np.int64_t *> p[1]
+ cdef np.uint8_t *cell_arr = <np.uint8_t *> p[2]
+ level_arr[data.index] = data.level
+ find_arr[data.index] = o.file_ind
+ cell_arr[data.index] = rind(data)
+ data.index +=1
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/particle_deposit.pxd
--- a/yt/geometry/particle_deposit.pxd
+++ b/yt/geometry/particle_deposit.pxd
@@ -63,6 +63,8 @@
# We assume each will allocate and define their own temporary storage
cdef public object nvals
cdef public int bad_indices
+ cdef int update_values
cdef void process(self, int dim[3], np.float64_t left_edge[3],
np.float64_t dds[3], np.int64_t offset,
- np.float64_t ppos[3], np.float64_t *fields)
+ np.float64_t ppos[3], np.float64_t *fields,
+ np.int64_t domain_ind)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -38,6 +38,7 @@
cdef class ParticleDepositOperation:
def __init__(self, nvals):
self.nvals = nvals
+ self.update_values = 0 # This is the default
def initialize(self, *args):
raise NotImplementedError
@@ -101,7 +102,10 @@
if offset < 0: continue
# Check that we found the oct ...
self.process(dims, oi.left_edge, oi.dds,
- offset, pos, field_vals)
+ offset, pos, field_vals, oct.domain_ind)
+ if self.update_values == 1:
+ for j in range(nf):
+ field_pointers[j][i] = field_vals[j]
@cython.boundscheck(False)
@cython.wraparound(False)
@@ -116,6 +120,7 @@
cdef np.ndarray[np.float64_t, ndim=1] tarr
field_pointers = <np.float64_t**> alloca(sizeof(np.float64_t *) * nf)
field_vals = <np.float64_t*>alloca(sizeof(np.float64_t) * nf)
+ cdef np.int64_t gid = getattr(gobj, "id", -1)
for i in range(nf):
tarr = fields[i]
field_pointers[i] = <np.float64_t *> tarr.data
@@ -131,11 +136,15 @@
field_vals[j] = field_pointers[j][i]
for j in range(3):
pos[j] = positions[i, j]
- self.process(dims, left_edge, dds, 0, pos, field_vals)
+ self.process(dims, left_edge, dds, 0, pos, field_vals, gid)
+ if self.update_values == 1:
+ for j in range(nf):
+ field_pointers[j][i] = field_vals[j]
cdef void process(self, int dim[3], np.float64_t left_edge[3],
np.float64_t dds[3], np.int64_t offset,
- np.float64_t ppos[3], np.float64_t *fields):
+ np.float64_t ppos[3], np.float64_t *fields,
+ np.int64_t domain_ind):
raise NotImplementedError
cdef class CountParticles(ParticleDepositOperation):
@@ -154,7 +163,8 @@
np.float64_t dds[3],
np.int64_t offset, # offset into IO field
np.float64_t ppos[3], # this particle's position
- np.float64_t *fields # any other fields we need
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
# here we do our thing; this is the kernel
cdef int ii[3], i
@@ -190,7 +200,8 @@
np.float64_t dds[3],
np.int64_t offset,
np.float64_t ppos[3],
- np.float64_t *fields
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
cdef int ii[3], half_len, ib0[3], ib1[3]
cdef int i, j, k
@@ -243,7 +254,8 @@
np.float64_t dds[3],
np.int64_t offset,
np.float64_t ppos[3],
- np.float64_t *fields
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
cdef int ii[3], i
for i in range(3):
@@ -289,7 +301,8 @@
np.float64_t dds[3],
np.int64_t offset,
np.float64_t ppos[3],
- np.float64_t *fields
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
cdef int ii[3], i, cell_index
cdef float k, mk, qk
@@ -331,7 +344,8 @@
np.float64_t dds[3],
np.int64_t offset, # offset into IO field
np.float64_t ppos[3], # this particle's position
- np.float64_t *fields # any other fields we need
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
cdef int i, j, k, ind[3], ii
@@ -375,14 +389,15 @@
self.ow = np.zeros(self.nvals, dtype='float64', order='F')
cdef np.ndarray warr = self.ow
self.w = <np.float64_t*> warr.data
-
+
@cython.cdivision(True)
cdef void process(self, int dim[3],
np.float64_t left_edge[3],
np.float64_t dds[3],
np.int64_t offset,
np.float64_t ppos[3],
- np.float64_t *fields
+ np.float64_t *fields,
+ np.int64_t domain_ind
):
cdef int ii[3], i
for i in range(3):
@@ -393,5 +408,27 @@
def finalize(self):
return self.owf / self.ow
-deposit_weighted_mean= WeightedMeanParticleField
+deposit_weighted_mean = WeightedMeanParticleField
+cdef class MeshIdentifier(ParticleDepositOperation):
+ # This is a tricky one! What it does is put into the particle array the
+ # value of the oct or block (grids will always be zero) identifier that a
+ # given particle resides in
+ def initialize(self):
+ self.update_values = 1
+
+ @cython.cdivision(True)
+ cdef void process(self, int dim[3],
+ np.float64_t left_edge[3],
+ np.float64_t dds[3],
+ np.int64_t offset,
+ np.float64_t ppos[3],
+ np.float64_t *fields,
+ np.int64_t domain_ind
+ ):
+ fields[0] = domain_ind
+
+ def finalize(self):
+ return
+
+deposit_mesh_id = MeshIdentifier
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/particle_geometry_handler.py
--- a/yt/geometry/particle_geometry_handler.py
+++ b/yt/geometry/particle_geometry_handler.py
@@ -87,7 +87,7 @@
pf = self.parameter_file
self.oct_handler = ParticleOctreeContainer(
[1, 1, 1], pf.domain_left_edge, pf.domain_right_edge)
- self.oct_handler.n_ref = 64
+ self.oct_handler.n_ref = pf.n_ref
mylog.info("Allocating for %0.3e particles", self.total_particles)
# No more than 256^3 in the region finder.
N = min(len(self.data_files), 256)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -190,7 +190,7 @@
if cur.children == NULL or \
cur.children[cind(ind[0],ind[1],ind[2])] == NULL:
cur = self.refine_oct(cur, index, level)
- self.filter_particles(cur, data, p, level + 1)
+ self.filter_particles(cur, data, p, level)
else:
cur = cur.children[cind(ind[0],ind[1],ind[2])]
cur.file_ind += 1
@@ -215,7 +215,7 @@
o.children[cind(i,j,k)] = noct
o.file_ind = self.n_ref + 1
for i in range(3):
- ind[i] = (index >> ((ORDER_MAX - (level + 1))*3 + (2 - i))) & 1
+ ind[i] = (index >> ((ORDER_MAX - level)*3 + (2 - i))) & 1
noct = o.children[cind(ind[0],ind[1],ind[2])]
return noct
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -3,6 +3,8 @@
from yt.geometry.particle_oct_container import ParticleOctreeContainer
from yt.geometry.oct_container import _ORDER_MAX
from yt.utilities.lib.geometry_utils import get_morton_indices
+from yt.frontends.stream.api import load_particles
+import yt.data_objects.api
import time, os
NPART = 32**3
@@ -35,7 +37,27 @@
# This visits every cell -- including those covered by octs.
#for dom in range(ndom):
# level_count += octree.count_levels(total_count.size-1, dom, mask)
- yield assert_equal, total_count, [1, 8, 64, 104, 184, 480, 1680, 1480]
+ yield assert_equal, total_count, [1, 8, 64, 64, 256, 536, 1856, 1672]
+
+def test_particle_octree_counts():
+ np.random.seed(int(0x4d3d3d3))
+ # Eight times as many!
+ pos = []
+ data = {}
+ bbox = []
+ for i, ax in enumerate('xyz'):
+ DW = DRE[i] - DLE[i]
+ LE = DLE[i]
+ data["particle_position_%s" % ax] = \
+ np.random.normal(0.5, scale=0.05, size=(NPART*8)) * DW + LE
+ bbox.append( [DLE[i], DRE[i]] )
+ bbox = np.array(bbox)
+ for n_ref in [16, 32, 64, 512, 1024]:
+ pf = load_particles(data, 1.0, bbox = bbox, n_ref = n_ref)
+ dd = pf.h.all_data()
+ bi = dd["all","mesh_id"]
+ v = np.bincount(bi.astype("int64"))
+ yield assert_equal, v.max() <= n_ref, True
if __name__=="__main__":
for i in test_add_particles_random():
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/amr_kdtree/amr_kdtree.py
--- a/yt/utilities/amr_kdtree/amr_kdtree.py
+++ b/yt/utilities/amr_kdtree/amr_kdtree.py
@@ -54,14 +54,14 @@
class Tree(object):
def __init__(self, pf, comm_rank=0, comm_size=1, left=None, right=None,
- min_level=None, max_level=None, source=None):
+ min_level=None, max_level=None, data_source=None):
self.pf = pf
self._id_offset = self.pf.h.grids[0]._id_offset
- if source is None:
- source = pf.h.all_data()
- self.source = source
+ if data_source is None:
+ data_source = pf.h.all_data()
+ self.data_source = data_source
if left is None:
left = np.array([-np.inf]*3)
if right is None:
@@ -87,8 +87,8 @@
def build(self):
lvl_range = range(self.min_level, self.max_level+1)
for lvl in lvl_range:
- #grids = self.source.select_grids(lvl)
- grids = np.array([b for b, mask in self.source.blocks if b.Level == lvl])
+ #grids = self.data_source.select_grids(lvl)
+ grids = np.array([b for b, mask in self.data_source.blocks if b.Level == lvl])
if len(grids) == 0: continue
self.add_grids(grids)
@@ -141,7 +141,7 @@
no_ghost = True
def __init__(self, pf, min_level=None, max_level=None,
- source=None):
+ data_source=None):
ParallelAnalysisInterface.__init__(self)
@@ -158,14 +158,14 @@
except AttributeError:
self._id_offset = 0
- if source is None:
- source = self.pf.h.all_data()
- self.source = source
+ if data_source is None:
+ data_source = self.pf.h.all_data()
+ self.data_source = data_source
mylog.debug('Building AMRKDTree')
self.tree = Tree(pf, self.comm.rank, self.comm.size,
min_level=min_level, max_level=max_level,
- source=source)
+ data_source=data_source)
def set_fields(self, fields, log_fields, no_ghost):
self.fields = fields
@@ -257,17 +257,23 @@
else:
dds = []
for i, field in enumerate(self.fields):
- vcd = grid.get_vertex_centered_data(field, smoothed=True,no_ghost=self.no_ghost).astype('float64')
+ vcd = grid.get_vertex_centered_data(field, smoothed=True, no_ghost=self.no_ghost).astype('float64')
if self.log_fields[i]: vcd = np.log10(vcd)
dds.append(vcd)
self.current_saved_grids.append(grid)
self.current_vcds.append(dds)
+ if self.data_source.selector is None:
+ mask = np.ones(dims, dtype='uint8')
+ else:
+ mask = self.data_source.selector.fill_mask(grid)[li[0]:ri[0], li[1]:ri[1], li[2]:ri[2] ].astype('uint8')
+
data = [d[li[0]:ri[0]+1,
li[1]:ri[1]+1,
li[2]:ri[2]+1].copy() for d in dds]
brick = PartitionedGrid(grid.id, data,
+ mask,
nle.copy(),
nre.copy(),
dims.astype('int64'))
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/cosmology.py
--- a/yt/utilities/cosmology.py
+++ b/yt/utilities/cosmology.py
@@ -61,7 +61,7 @@
self.HubbleDistance()))
elif (self.OmegaCurvatureNow < 0):
return (self.HubbleDistance() / np.sqrt(np.fabs(self.OmegaCurvatureNow)) *
- sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) *
+ np.sin(np.sqrt(np.fabs(self.OmegaCurvatureNow)) *
self.ComovingRadialDistance(z_i,z_f) / self.HubbleDistance()))
else:
return self.ComovingRadialDistance(z_i,z_f)
@@ -73,7 +73,7 @@
np.sqrt(1 + self.OmegaCurvatureNow *
sqr(self.ComovingTransverseDistance(z_i,z_f) /
self.HubbleDistance())) -
- anp.sinh(np.fabs(self.OmegaCurvatureNow) *
+ np.sinh(np.fabs(self.OmegaCurvatureNow) *
self.ComovingTransverseDistance(z_i,z_f) /
self.HubbleDistance()) / np.sqrt(self.OmegaCurvatureNow)) / 1e9)
elif (self.OmegaCurvatureNow < 0):
@@ -83,7 +83,7 @@
np.sqrt(1 + self.OmegaCurvatureNow *
sqr(self.ComovingTransverseDistance(z_i,z_f) /
self.HubbleDistance())) -
- asin(np.fabs(self.OmegaCurvatureNow) *
+ np.arcsin(np.fabs(self.OmegaCurvatureNow) *
self.ComovingTransverseDistance(z_i,z_f) /
self.HubbleDistance()) /
np.sqrt(np.fabs(self.OmegaCurvatureNow))) / 1e9)
@@ -269,7 +269,7 @@
# 3) For OmegaMatterNow > 1 and OmegaLambdaNow == 0, use sin/cos.
if ((self.OmegaMatterNow > 1) and (self.OmegaLambdaNow == 0)):
- eta = np.acos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
+ eta = np.arccos(1 - 2*(1-self.OmegaMatterNow)/self.OmegaMatterNow/(1+z))
TimeHubble0 = self.OmegaMatterNow/(2*np.power(1.0-self.OmegaMatterNow, 1.5))*\
(eta - np.sin(eta))
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -109,7 +109,7 @@
% (self.display_name, self.field_name) + self.mathtext_error
class YTCannotParseUnitDisplayName(YTException):
- def __init__(self, field_name, display_unit, mathtext_error):
+ def __init__(self, field_name, unit_name, mathtext_error):
self.field_name = field_name
self.unit_name = unit_name
self.mathtext_error = mathtext_error
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/flagging_methods.py
--- a/yt/utilities/flagging_methods.py
+++ b/yt/utilities/flagging_methods.py
@@ -162,6 +162,9 @@
# Note that sd is offset by one
if sd[i-1] * sd[i] < 0:
strength = np.abs(sd[i-1] - sd[i])
+ # TODO this differs from what I could find in ENZO
+ # there's |center - i| < |center - zero_cross| instead
+ # additionally zero_cross is undefined in first pass
if strength > zero_strength or \
(strength == zero_strength and np.abs(center - i) < np.abs(zero_cross -i )):
zero_strength = strength
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/fortran_utils.py
--- a/yt/utilities/fortran_utils.py
+++ b/yt/utilities/fortran_utils.py
@@ -121,7 +121,8 @@
vec_fmt = "%s%s" % (endian, d)
vec_size = struct.calcsize(vec_fmt)
if vec_len % vec_size != 0:
- print "fmt = '%s' ; length = %s ; size= %s" % (fmt, length, size)
+ print("fmt = '%s' ; length = %s ; size= %s"
+ % (vec_fmt, vec_len, vec_size))
raise RuntimeError
vec_num = vec_len / vec_size
if isinstance(f, file): # Needs to be explicitly a file
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/lib/grid_traversal.pxd
--- a/yt/utilities/lib/grid_traversal.pxd
+++ b/yt/utilities/lib/grid_traversal.pxd
@@ -30,6 +30,8 @@
cdef struct VolumeContainer:
int n_fields
np.float64_t **data
+ # The mask has dimensions one fewer in each direction than data
+ np.uint8_t *mask
np.float64_t left_edge[3]
np.float64_t right_edge[3]
np.float64_t dds[3]
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/lib/grid_traversal.pyx
--- a/yt/utilities/lib/grid_traversal.pyx
+++ b/yt/utilities/lib/grid_traversal.pyx
@@ -60,6 +60,7 @@
cdef class PartitionedGrid:
cdef public object my_data
+ cdef public object source_mask
cdef public object LeftEdge
cdef public object RightEdge
cdef public int parent_grid_id
@@ -74,12 +75,14 @@
@cython.cdivision(True)
def __cinit__(self,
int parent_grid_id, data,
+ mask,
np.ndarray[np.float64_t, ndim=1] left_edge,
np.ndarray[np.float64_t, ndim=1] right_edge,
np.ndarray[np.int64_t, ndim=1] dims,
star_kdtree_container star_tree = None):
# The data is likely brought in via a slice, so we copy it
cdef np.ndarray[np.float64_t, ndim=3] tdata
+ cdef np.ndarray[np.uint8_t, ndim=3] mask_data
self.container = NULL
self.parent_grid_id = parent_grid_id
self.LeftEdge = left_edge
@@ -96,10 +99,13 @@
c.dds[i] = (c.right_edge[i] - c.left_edge[i])/dims[i]
c.idds[i] = 1.0/c.dds[i]
self.my_data = data
+ self.source_mask = mask
+ mask_data = mask
c.data = <np.float64_t **> malloc(sizeof(np.float64_t*) * n_fields)
for i in range(n_fields):
tdata = data[i]
c.data[i] = <np.float64_t *> tdata.data
+ c.mask = <np.uint8_t *> mask_data.data
if star_tree is None:
self.star_list = NULL
else:
@@ -503,6 +509,10 @@
# we assume this has vertex-centered data.
cdef int offset = index[0] * (vc.dims[1] + 1) * (vc.dims[2] + 1) \
+ index[1] * (vc.dims[2] + 1) + index[2]
+ cdef int cell_offset = index[0] * (vc.dims[1]) * (vc.dims[2]) \
+ + index[1] * (vc.dims[2]) + index[2]
+ if vc.mask[cell_offset] != 1:
+ return
cdef np.float64_t slopes[6], dp[3], ds[3]
cdef np.float64_t dt = (exit_t - enter_t) / vri.n_samples
cdef np.float64_t dvs[6]
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/parallel_tools/controller_system.py
--- a/yt/utilities/parallel_tools/controller_system.py
+++ b/yt/utilities/parallel_tools/controller_system.py
@@ -27,6 +27,8 @@
from .parallel_analysis_interface import MPI
except ImportError:
pass
+from .parallel_analysis_interface import \
+ ProcessorPool
from contextmanager import contextlib
from abc import ABCMeta, abstractmethod, abstractproperty
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/parallel_tools/io_runner.py
--- a/yt/utilities/parallel_tools/io_runner.py
+++ b/yt/utilities/parallel_tools/io_runner.py
@@ -24,7 +24,10 @@
"""
import os
-from .parallel_analysis_interface import ProcessorPool
+import np
+from yt.utilities.logger import ytLogger as mylog
+from .parallel_analysis_interface import \
+ ProcessorPool, parallel_objects
from yt.utilities.io_handler import BaseIOHandler
from contextlib import contextmanager
import time
@@ -168,6 +171,7 @@
pf.h.io = original_io
def io_nodes(fn, n_io, n_work, func, *args, **kwargs):
+ from yt.mods import load
pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")])
rv = None
if wg.name == "work":
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/parallel_tools/parallel_analysis_interface.py
--- a/yt/utilities/parallel_tools/parallel_analysis_interface.py
+++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py
@@ -1115,8 +1115,9 @@
"""
LE, RE = left_edge[:], right_edge[:]
if not self._distributed:
+ raise NotImplemented
return LE, RE, re
-
+
cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3)
mi = self.comm.rank % (self.comm.size / rank_ratio)
cx, cy, cz = np.unravel_index(mi, cc)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/parameter_file_storage.py
--- a/yt/utilities/parameter_file_storage.py
+++ b/yt/utilities/parameter_file_storage.py
@@ -221,7 +221,8 @@
def __init__(self, path = None):
if path is None:
path = ytcfg.get("yt", "enzo_db")
- if len(path) == 0: raise Runtime
+ if len(path) == 0:
+ raise RuntimeError
import sqlite3
self.conn = sqlite3.connect(path)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/pykdtree.py
--- a/yt/utilities/pykdtree.py
+++ b/yt/utilities/pykdtree.py
@@ -2,6 +2,7 @@
# Released under the scipy license
import sys
import numpy as np
+import scipy
from heapq import heappush, heappop
def minkowski_distance_p(x,y,p=2):
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/utilities/rpdb.py
--- a/yt/utilities/rpdb.py
+++ b/yt/utilities/rpdb.py
@@ -25,6 +25,7 @@
import cmd, pdb, cStringIO, xmlrpclib, socket, sys
import traceback
+import signal
from SimpleXMLRPCServer import SimpleXMLRPCServer
from yt.config import ytcfg
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/visualization/plot_window.py
--- a/yt/visualization/plot_window.py
+++ b/yt/visualization/plot_window.py
@@ -1100,12 +1100,15 @@
def _send_zmq(self):
try:
- # pre-IPython v0.14
+ # pre-IPython v1.0
from IPython.zmq.pylab.backend_inline import send_figure as display
except ImportError:
- # IPython v0.14+
+ # IPython v1.0+
from IPython.core.display import display
for k, v in sorted(self.plots.iteritems()):
+ # Due to a quirk in the matplotlib API, we need to create
+ # a dummy canvas variable here that is never used.
+ canvas = FigureCanvasAgg(v.figure) # NOQA
display(v.figure)
def show(self):
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -121,6 +121,10 @@
accuracy/smoothness in resulting image. The effects are
less notable when the transfer function is smooth and
broad. Default: True
+ data_source: data container, optional
+ Optionally specify an arbitrary data source to the volume rendering.
+ All cells not included in the data source will be ignored during ray
+ casting. By default this will get set to pf.h.all_data().
Examples
--------
@@ -164,7 +168,7 @@
log_fields = None,
sub_samples = 5, pf = None,
min_level=None, max_level=None, no_ghost=True,
- source=None,
+ data_source=None,
use_light=False):
ParallelAnalysisInterface.__init__(self)
if pf is not None: self.pf = pf
@@ -196,13 +200,13 @@
if self.no_ghost:
mylog.info('Warning: no_ghost is currently True (default). This may lead to artifacts at grid boundaries.')
- if source is None:
- source = self.pf.h.all_data()
- self.source = source
+ if data_source is None:
+ data_source = self.pf.h.all_data()
+ self.data_source = data_source
if volume is None:
volume = AMRKDTree(self.pf, min_level=min_level,
- max_level=max_level, source=self.source)
+ max_level=max_level, data_source=self.data_source)
self.volume = volume
def _setup_box_properties(self, width, center, unit_vectors):
@@ -1125,6 +1129,8 @@
sub_samples = 5, log_fields = None, volume = None,
pf = None, use_kd=True, no_ghost=False, use_light=False,
inner_radius = 10):
+ mylog.error('I am sorry, HEALpix Camera does not work yet in 3.0')
+ raise NotImplementedError
ParallelAnalysisInterface.__init__(self)
if pf is not None: self.pf = pf
self.center = np.array(center, dtype='float64')
@@ -1155,8 +1161,8 @@
self.light_dir = None
self.light_rgba = None
if volume is None:
- volume = AMRKDTree(self.pf, fields=self.fields, no_ghost=no_ghost,
- log_fields=log_fields)
+ volume = AMRKDTree(self.pf, min_level=min_level,
+ max_level=max_level, data_source=self.data_source)
self.use_kd = isinstance(volume, AMRKDTree)
self.volume = volume
@@ -1963,7 +1969,7 @@
yield self.snapshot()
def allsky_projection(pf, center, radius, nside, field, weight = None,
- inner_radius = 10, rotation = None, source = None):
+ inner_radius = 10, rotation = None, data_source = None):
r"""Project through a parameter file, through an allsky-method
decomposition from HEALpix, and return the image plane.
@@ -1998,7 +2004,7 @@
If supplied, the vectors will be rotated by this. You can construct
this by, for instance, calling np.array([v1,v2,v3]) where those are the
three reference planes of an orthogonal frame (see ortho_find).
- source : data container, default None
+ data_source : data container, default None
If this is supplied, this gives the data source from which the all sky
projection pulls its data from.
@@ -2044,16 +2050,16 @@
positions += inner_radius * dx * vs
vs *= radius
uv = np.ones(3, dtype='float64')
- if source is not None:
- grids = source._grids
+ if data_source is not None:
+ grids = data_source._grids
else:
grids = pf.h.sphere(center, radius)._grids
sampler = ProjectionSampler(positions, vs, center, (0.0, 0.0, 0.0, 0.0),
image, uv, uv, np.zeros(3, dtype='float64'))
pb = get_pbar("Sampling ", len(grids))
for i,grid in enumerate(grids):
- if source is not None:
- data = [grid[field] * source._get_cut_mask(grid) * \
+ if data_source is not None:
+ data = [grid[field] * data_source._get_cut_mask(grid) * \
grid.child_mask.astype('float64')
for field in fields]
else:
@@ -2197,12 +2203,13 @@
np.minimum(mi, this_point, mi)
np.maximum(ma, this_point, ma)
# Now we have a bounding box.
- source = pf.h.region(self.center, mi, ma)
+ data_source = pf.h.region(self.center, mi, ma)
- for i, (grid, mask) in enumerate(source.blocks):
+ for i, (grid, mask) in enumerate(data_source.blocks):
data = [(grid[field] * mask).astype("float64") for field in fields]
pg = PartitionedGrid(
grid.id, data,
+ mask.astype('uint8'),
grid.LeftEdge, grid.RightEdge, grid.ActiveDimensions.astype("int64"))
grid.clear_data()
sampler(pg, num_threads = num_threads)
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/visualization/volume_rendering/setup.py
--- a/yt/visualization/volume_rendering/setup.py
+++ b/yt/visualization/volume_rendering/setup.py
@@ -12,4 +12,5 @@
config = Configuration('volume_rendering', parent_package, top_path)
config.make_config_py() # installs __config__.py
#config.make_svn_version_py()
+ config.add_subpackage('tests')
return config
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/visualization/volume_rendering/tests/test_vr_cameras.py
--- /dev/null
+++ b/yt/visualization/volume_rendering/tests/test_vr_cameras.py
@@ -0,0 +1,163 @@
+"""
+Test for Volume Rendering Cameras, and their movement.
+
+Author: Samuel Skillman <samskillman at gmail.com>
+Homepage: http://yt-project.org/
+License:
+ Copyright (C) 2013 Samuel Skillman. All Rights Reserved.
+
+ This file is part of yt.
+
+ yt is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+import os
+import os.path
+import tempfile
+import shutil
+from yt.testing import \
+ fake_random_pf
+import numpy as np
+from yt.mods import ColorTransferFunction, ProjectionTransferFunction
+from yt.visualization.volume_rendering.api import \
+ PerspectiveCamera, StereoPairCamera, InteractiveCamera, ProjectionCamera
+from yt.visualization.tests.test_plotwindow import assert_fname
+from unittest import TestCase
+
+# This toggles using a temporary directory. Turn off to examine images.
+use_tmpdir = True
+
+
+def setup():
+ """Test specific setup."""
+ from yt.config import ytcfg
+ ytcfg["yt", "__withintesting"] = "True"
+
+
+class CameraTest(TestCase):
+ def setUp(self):
+ if use_tmpdir:
+ self.curdir = os.getcwd()
+ # Perform I/O in safe place instead of yt main dir
+ self.tmpdir = tempfile.mkdtemp()
+ os.chdir(self.tmpdir)
+ else:
+ self.curdir, self.tmpdir = None, None
+
+ self.pf = fake_random_pf(64)
+ self.c = self.pf.domain_center
+ self.L = np.array([0.5, 0.5, 0.5])
+ self.W = 1.5*self.pf.domain_width
+ self.N = 64
+ self.field = "Density"
+
+ def tearDown(self):
+ if use_tmpdir:
+ os.chdir(self.curdir)
+ shutil.rmtree(self.tmpdir)
+
+ def setup_transfer_function(self, camera_type):
+ if camera_type in ['perspective', 'camera',
+ 'stereopair', 'interactive']:
+ mi, ma = self.pf.h.all_data().quantities['Extrema']('Density')[0]
+ tf = ColorTransferFunction((mi-1., ma+1.), grey_opacity=True)
+ tf.map_to_colormap(mi, ma, scale=10., colormap='RdBu_r')
+ return tf
+ elif camera_type in ['healpix']:
+ return ProjectionTransferFunction()
+ else:
+ pass
+
+ def test_camera(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+ cam = self.pf.h.camera(self.c, self.L, self.W, self.N,
+ transfer_function=tf)
+ cam.snapshot('camera.png')
+ assert_fname('camera.png')
+
+ def test_data_source_camera(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+ data_source = pf.h.sphere(pf.domain_center, pf.domain_width[0]*0.5)
+
+ cam = pf.h.camera(self.c, self.L, self.W, self.N,
+ transfer_function=tf, data_source=data_source)
+ cam.snapshot('data_source_camera.png')
+ assert_fname('data_source_camera.png')
+
+ def test_perspective_camera(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+
+ cam = PerspectiveCamera(self.c, self.L, self.W, self.N, pf=pf,
+ transfer_function=tf)
+ cam.snapshot('perspective.png')
+ assert_fname('perspective.png')
+
+ def test_interactive_camera(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+
+ cam = InteractiveCamera(self.c, self.L, self.W, self.N, pf=pf,
+ transfer_function=tf)
+ # Can't take a snapshot here since IC uses pylab.'
+
+ def test_projection_camera(self):
+ pf = self.pf
+
+ cam = ProjectionCamera(self.c, self.L, self.W, self.N, pf=pf,
+ field='Density')
+ cam.snapshot('projection.png')
+ assert_fname('projection.png')
+
+ def test_stereo_camera(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+
+ cam = pf.h.camera(self.c, self.L, self.W, self.N, transfer_function=tf)
+ stereo_cam = StereoPairCamera(cam)
+ # Take image
+ cam1, cam2 = stereo_cam.split()
+ cam1.snapshot(fn='stereo1.png')
+ cam2.snapshot(fn='stereo2.png')
+ assert_fname('stereo1.png')
+ assert_fname('stereo2.png')
+
+ def test_camera_movement(self):
+ pf = self.pf
+ tf = self.setup_transfer_function('camera')
+
+ cam = pf.h.camera(self.c, self.L, self.W, self.N, transfer_function=tf)
+ cam.zoom(0.5)
+ for snap in cam.zoomin(2.0, 3):
+ snap
+ for snap in cam.move_to(np.array(self.c) + 0.1, 3,
+ final_width=None, exponential=False):
+ snap
+ for snap in cam.move_to(np.array(self.c) - 0.1, 3,
+ final_width=2.0*self.W, exponential=False):
+ snap
+ for snap in cam.move_to(np.array(self.c), 3,
+ final_width=1.0*self.W, exponential=True):
+ snap
+ cam.rotate(np.pi/10)
+ cam.pitch(np.pi/10)
+ cam.yaw(np.pi/10)
+ cam.roll(np.pi/10)
+ for snap in cam.rotation(np.pi, 3, rot_vector=None):
+ snap
+ for snap in cam.rotation(np.pi, 3, rot_vector=np.random.random(3)):
+ snap
+ cam.snapshot('final.png')
+ assert_fname('final.png')
diff -r b6d6b9082ebb74b645ee43fdddd99b866f448459 -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -636,24 +636,59 @@
v, w, (r,g,b,alpha)))
def map_to_colormap(self, mi, ma, scale=1.0, colormap="gist_stern",
- scale_func=None):
+ scale_func=None):
+ r"""Map a range of values to a full colormap.
+
+ Given a minimum and maximum value in the TransferFunction, map a full
+ colormap over that range at an alpha level of `scale`.
+ Optionally specify a scale_func function that modifies the alpha as
+ a function of the transfer function value.
+
+ Parameters
+ ----------
+ mi : float
+ The start of the TransferFunction to map the colormap
+ ma : float
+ The end of the TransferFunction to map the colormap
+ scale: float, optional
+ The alpha value to be used for the height of the transfer function.
+ Larger values will be more opaque.
+ colormap : string, optional
+ An acceptable colormap. See either yt.visualization.color_maps or
+ http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps .
+ scale_func: function(value, minval, maxval), optional
+ A user-defined function that can be used to scale the alpha channel
+ as a function of the TransferFunction field values. Function maps
+ value to somewhere between minval and maxval.
+
+ Examples
+ --------
+
+ >>> def linramp(vals, minval, maxval):
+ ... return (vals - vals.min())/(vals.(max) - vals.min())
+ >>> tf = ColorTransferFunction( (-10.0, -5.0) )
+ >>> tf.map_to_colormap(-8.0, -6.0, scale=10.0, colormap='algae')
+ >>> tf.map_to_colormap(-6.0, -5.0, scale=10.0, colormap='algae',
+ ... scale_func = linramp)
+ """
+
rel0 = int(self.nbins*(mi - self.x_bounds[0])/(self.x_bounds[1] -
- self.x_bounds[0]))
+ self.x_bounds[0]))
rel1 = int(self.nbins*(ma - self.x_bounds[0])/(self.x_bounds[1] -
- self.x_bounds[0]))
+ self.x_bounds[0]))
rel0 = max(rel0, 0)
rel1 = min(rel1, self.nbins-1)
- tomap = np.linspace(0.,1.,num=rel1-rel0)
+ tomap = np.linspace(0., 1., num=rel1-rel0)
cmap = get_cmap(colormap)
cc = cmap(tomap)
if scale_func is None:
scale_mult = 1.0
else:
- scale_mult = scale_func(tomap,0.0,1.0)
- self.red.y[rel0:rel1] = cc[:,0]*scale_mult
- self.green.y[rel0:rel1]= cc[:,1]*scale_mult
- self.blue.y[rel0:rel1] = cc[:,2]*scale_mult
- self.alpha.y[rel0:rel1]= scale*cc[:,3]*scale_mult
+ scale_mult = scale_func(tomap, 0.0, 1.0)
+ self.red.y[rel0:rel1] = cc[:, 0]*scale_mult
+ self.green.y[rel0:rel1] = cc[:, 1]*scale_mult
+ self.blue.y[rel0:rel1] = cc[:, 2]*scale_mult
+ self.alpha.y[rel0:rel1] = scale*cc[:, 3]*scale_mult
def add_layers(self, N, w=None, mi=None, ma=None, alpha = None,
colormap="gist_stern", col_bounds = None):
https://bitbucket.org/yt_analysis/yt-3.0/commits/3ac193628a44/
Changeset: 3ac193628a44
Branch: yt-3.0
User: juxtaposicion
Date: 2013-08-22 07:46:36
Summary: merge
Affected #: 68 files
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 .hgchurn
--- a/.hgchurn
+++ b/.hgchurn
@@ -4,8 +4,16 @@
juxtaposicion at gmail.com = cemoody at ucsc.edu
chummels at gmail.com = chummels at astro.columbia.edu
jwise at astro.princeton.edu = jwise at physics.gatech.edu
-atmyers = atmyers at berkeley.edu
sam.skillman at gmail.com = samskillman at gmail.com
casey at thestarkeffect.com = caseywstark at gmail.com
chiffre = chiffre at posteo.de
Christian Karch = chiffre at posteo.de
+atmyers at berkeley.edu = atmyers2 at gmail.com
+atmyers = atmyers2 at gmail.com
+drudd = drudd at uchicago.edu
+awetzel = andrew.wetzel at yale.edu
+David Collins (dcollins4096 at gmail.com) = dcollins4096 at gmail.com
+dcollins at physics.ucsd.edu = dcollins4096 at gmail.com
+tabel = tabel at slac.stanford.edu
+sername=kayleanelson = kaylea.nelson at yale.edu
+kayleanelson = kaylea.nelson at yale.edu
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,4 @@
0000000000000000000000000000000000000000 hop callback
a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,4 @@
include distribute_setup.py README* CREDITS FUNDING LICENSE.txt
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
recursive-include yt *.pyx *.pxd *.hh *.h README*
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
\ No newline at end of file
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
function do_setup_py
{
[ -e $1/done ] && return
- echo "Installing $1 (arguments: '$*')"
- [ ! -e $1/extracted ] && tar xfz $1.tar.gz
- touch $1/extracted
- cd $1
- if [ ! -z `echo $1 | grep h5py` ]
+ LIB=$1
+ shift
+ if [ -z "$@" ]
+ then
+ echo "Installing $LIB"
+ else
+ echo "Installing $LIB (arguments: '$@')"
+ fi
+ [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+ touch $LIB/extracted
+ cd $LIB
+ if [ ! -z `echo $LIB | grep h5py` ]
then
shift
( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
function get_ytproject
{
+ [ -e $1 ] && return
echo "Downloading $1 from yt-project.org"
- [ -e $1 ] && return
${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
}
@@ -551,67 +558,93 @@
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
# Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1 Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299 Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3 freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3 h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554 libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208 matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2 mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8 numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865 zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83 pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1 tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397 python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4 scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
- if [ ! -e bzip2-1.0.6/done ]
+ if [ ! -e $BZLIB/done ]
then
- [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+ [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
echo "Installing BZLIB"
- cd bzip2-1.0.6
+ cd $BZLIB
if [ `uname` = "Darwin" ]
then
if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
if [ $INST_ZLIB -eq 1 ]
then
- if [ ! -e zlib-1.2.7/done ]
+ if [ ! -e $ZLIB/done ]
then
- [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+ [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
echo "Installing ZLIB"
- cd zlib-1.2.7
+ cd $ZLIB
( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
if [ $INST_PNG -eq 1 ]
then
- if [ ! -e libpng-1.6.1/done ]
+ if [ ! -e $PNG/done ]
then
- [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+ [ ! -e $PNG ] && tar xfz $PNG.tar.gz
echo "Installing PNG"
- cd libpng-1.6.1
+ cd $PNG
( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
if [ $INST_FTYPE -eq 1 ]
then
- if [ ! -e freetype-2.4.11/done ]
+ if [ ! -e $FREETYPE_VER/done ]
then
- [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+ [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
echo "Installing FreeType2"
- cd freetype-2.4.11
+ cd $FREETYPE_VER
( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
@@ -688,11 +722,11 @@
if [ -z "$HDF5_DIR" ]
then
- if [ ! -e hdf5-1.8.9/done ]
+ if [ ! -e $HDF5/done ]
then
- [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+ [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
echo "Installing HDF5"
- cd hdf5-1.8.9
+ cd $HDF5
( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
if [ $INST_SQLITE3 -eq 1 ]
then
- if [ ! -e sqlite-autoconf-3071601/done ]
+ if [ ! -e $SQLITE/done ]
then
- [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+ [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
echo "Installing SQLite3"
- cd sqlite-autoconf-3071601
+ cd $SQLITE
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
fi
fi
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
then
echo "Installing Python. This may take a while, but don't worry. yt loves you."
- [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
- cd Python-2.7.4
+ [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+ cd $PYTHON
( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
if [ $INST_HG -eq 1 ]
then
- do_setup_py mercurial-2.5.4
+ do_setup_py $MERCURIAL
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
if [ $INST_SCIPY -eq 0 ]
then
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+ do_setup_py $NUMPY ${NUMPY_ARGS}
else
- if [ ! -e scipy-0.11.0/done ]
+ if [ ! -e $SCIPY/done ]
then
if [ ! -e BLAS/done ]
then
@@ -798,17 +832,17 @@
echo "Building BLAS"
cd BLAS
gfortran -O2 -fPIC -fno-second-underscore -c *.f
- ar r libfblas.a *.o 1>> ${LOG_FILE}
+ ar r libfblas.a *.o &>> ${LOG_FILE}
ranlib libfblas.a 1>> ${LOG_FILE}
rm -rf *.o
touch done
cd ..
fi
- if [ ! -e lapack-3.4.2/done ]
+ if [ ! -e $LAPACK/done ]
then
- tar xfz lapack-3.4.2.tar.gz
+ tar xfz $LAPACK.tar.gz
echo "Building LAPACK"
- cd lapack-3.4.2/
+ cd $LAPACK/
cp INSTALL/make.inc.gfortran make.inc
make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
touch done
@@ -816,9 +850,9 @@
fi
fi
export BLAS=$PWD/BLAS/libfblas.a
- export LAPACK=$PWD/lapack-3.4.2/liblapack.a
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
- do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+ export LAPACK=$PWD/$LAPACK/liblapack.a
+ do_setup_py $NUMPY ${NUMPY_ARGS}
+ do_setup_py $SCIPY ${NUMPY_ARGS}
fi
if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
echo "Setting CFLAGS ${CFLAGS}"
fi
# Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
if [ -n "${OLD_LDFLAGS}" ]
then
export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
# Now we do our IPython installation, which has two optional dependencies.
if [ $INST_0MQ -eq 1 ]
then
- if [ ! -e zeromq-3.2.2/done ]
+ if [ ! -e $ZEROMQ/done ]
then
- [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+ [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
echo "Installing ZeroMQ"
- cd zeromq-3.2.2
+ cd $ZEROMQ
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
- do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
- do_setup_py tornado-3.0
+ do_setup_py $PYZMQ --zmq=${DEST_DIR}
+ do_setup_py $TORNADO
fi
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
# Now we build Rockstar and set its environment variable.
if [ $INST_ROCKSTAR -eq 1 ]
then
if [ ! -e Rockstar/done ]
then
- [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+ [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
echo "Building Rockstar"
cd Rockstar
( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 scripts/iyt
--- a/scripts/iyt
+++ b/scripts/iyt
@@ -1,6 +1,6 @@
#!python
import os, re
-from distutils import version
+from distutils.version import LooseVersion
from yt.mods import *
from yt.data_objects.data_containers import YTDataContainer
namespace = locals().copy()
@@ -23,10 +23,12 @@
code.interact(doc, None, namespace)
sys.exit()
-if version.LooseVersion(IPython.__version__) <= version.LooseVersion('0.10'):
+if LooseVersion(IPython.__version__) <= LooseVersion('0.10'):
api_version = '0.10'
+elif LooseVersion(IPython.__version__) <= LooseVersion('1.0'):
+ api_version = '0.11'
else:
- api_version = '0.11'
+ api_version = '1.0'
if api_version == "0.10" and "DISPLAY" in os.environ:
from matplotlib import rcParams
@@ -42,13 +44,18 @@
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
elif api_version == "0.10":
ip_shell = IPython.Shell.IPShellMatplotlib(user_ns=namespace)
-elif api_version == "0.11":
- from IPython.frontend.terminal.interactiveshell import TerminalInteractiveShell
+else:
+ if api_version == "0.11":
+ from IPython.frontend.terminal.interactiveshell import \
+ TerminalInteractiveShell
+ elif api_version == "1.0":
+ from IPython.terminal.interactiveshell import TerminalInteractiveShell
+ else:
+ raise RuntimeError
ip_shell = TerminalInteractiveShell(user_ns=namespace, banner1 = doc,
display_banner = True)
if "DISPLAY" in os.environ: ip_shell.enable_pylab(import_all=False)
-else:
- raise RuntimeError
+
# The rest is a modified version of the IPython default profile code
@@ -77,7 +84,7 @@
ip = ip_shell.IP.getapi()
try_next = IPython.ipapi.TryNext
kwargs = dict(sys_exit=1, banner=doc)
-elif api_version == "0.11":
+elif api_version in ("0.11", "1.0"):
ip = ip_shell
try_next = IPython.core.error.TryNext
kwargs = dict()
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/__init__.py
--- a/yt/__init__.py
+++ b/yt/__init__.py
@@ -96,7 +96,7 @@
if answer_big_data:
nose_argv.append('--answer-big-data')
log_suppress = ytcfg.getboolean("yt","suppressStreamLogging")
- ytcfg["yt","suppressStreamLogging"] = 'True'
+ ytcfg.set("yt","suppressStreamLogging", 'True')
initial_dir = os.getcwd()
yt_file = os.path.abspath(__file__)
yt_dir = os.path.dirname(yt_file)
@@ -105,4 +105,4 @@
nose.run(argv=nose_argv)
finally:
os.chdir(initial_dir)
- ytcfg["yt","suppressStreamLogging"] = log_suppress
+ ytcfg.set("yt","suppressStreamLogging", str(log_suppress))
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- /dev/null
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -0,0 +1,809 @@
+from scipy import optimize
+import numpy as na
+import h5py
+from yt.analysis_modules.absorption_spectrum.absorption_line \
+ import voigt
+
+
+def generate_total_fit(x, fluxData, orderFits, speciesDicts,
+ minError=1E-5, complexLim=.999,
+ fitLim=.99, minLength=3,
+ maxLength=1000, splitLim=.99,
+ output_file=None):
+
+ """
+ This function is designed to fit an absorption spectrum by breaking
+ the spectrum up into absorption complexes, and iteratively adding
+ and optimizing voigt profiles to each complex.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ 1d array of wavelengths
+ fluxData : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ orderFits : list
+ list of the names of the species in the order that they
+ should be fit. Names should correspond to the names of the species
+ given in speciesDicts. (ex: ['lya','OVI'])
+ speciesDicts : dictionary
+ Dictionary of dictionaries (I'm addicted to dictionaries, I
+ confess). Top level keys should be the names of all the species given
+ in orderFits. The entries should be dictionaries containing all
+ relevant parameters needed to create an absorption line of a given
+ species (f,Gamma,lambda0) as well as max and min values for parameters
+ to be fit
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+ output_file : string, optional
+ location to save the results of the fit.
+
+ Returns
+ -------
+ allSpeciesLines : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line. (ie: N[0],b[0],z[0] will create a line that
+ fits some part of the absorption spectrum). 'group#' is a similar list
+ but identifies which absorbing complex each line belongs to. Lines
+ with the same group# were fit at the same time. group#'s do not
+ correlate between species (ie: an lya line with group number 1 and
+ an OVI line with group number 1 were not fit together and do
+ not necessarily correspond to the same region)
+ yFit : (N) ndarray
+ array of flux corresponding to the combination of all fitted
+ absorption profiles. Same size as x.
+ """
+
+ #Empty dictionary for fitted lines
+ allSpeciesLines = {}
+
+ #Wavelength of beginning of array, wavelength resolution
+ x0,xRes=x[0],x[1]-x[0]
+
+ #Empty fit without any lines
+ yFit = na.ones(len(fluxData))
+
+ #Find all regions where lines/groups of lines are present
+ cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
+ complexLim=complexLim, minLength=minLength,
+ maxLength=maxLength, splitLim=splitLim)
+
+ #Fit all species one at a time in given order from low to high wavelength
+ for species in orderFits:
+ speciesDict = speciesDicts[species]
+ speciesLines = {'N':na.array([]),
+ 'b':na.array([]),
+ 'z':na.array([]),
+ 'group#':na.array([])}
+
+ #Set up wavelengths for species
+ initWl = speciesDict['wavelength'][0]
+
+ for b_i,b in enumerate(cBounds):
+ xBounded=x[b[1]:b[2]]
+ yDatBounded=fluxData[b[1]:b[2]]
+ yFitBounded=yFit[b[1]:b[2]]
+
+ #Find init redshift
+ z=(xBounded[yDatBounded.argmin()]-initWl)/initWl
+
+ #Check if any flux at partner sites
+ if not _line_exists(speciesDict['wavelength'],
+ fluxData,z,x0,xRes,fitLim):
+ continue
+
+ #Fit Using complex tools
+ newLinesP,flag=_complex_fit(xBounded,yDatBounded,yFitBounded,
+ z,fitLim,minError*(b[2]-b[1]),speciesDict)
+
+ #Check existence of partner lines if applicable
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
+
+ #If flagged as a bad fit, species is lyman alpha,
+ # and it may be a saturated line, use special tools
+ if flag and species=='lya' and min(yDatBounded)<.1:
+ newLinesP=_large_flag_fit(xBounded,yDatBounded,
+ yFitBounded,z,speciesDict,
+ minSize,minError*(b[2]-b[1]))
+
+ #Adjust total current fit
+ yFit=yFit*_gen_flux_lines(x,newLinesP,speciesDict)
+
+ #Add new group to all fitted lines
+ if na.size(newLinesP)>0:
+ speciesLines['N']=na.append(speciesLines['N'],newLinesP[:,0])
+ speciesLines['b']=na.append(speciesLines['b'],newLinesP[:,1])
+ speciesLines['z']=na.append(speciesLines['z'],newLinesP[:,2])
+ groupNums = b_i*na.ones(na.size(newLinesP[:,0]))
+ speciesLines['group#']=na.append(speciesLines['group#'],groupNums)
+
+ allSpeciesLines[species]=speciesLines
+
+ if output_file:
+ _output_fit(allSpeciesLines, output_file)
+
+ return (allSpeciesLines,yFit)
+
+def _complex_fit(x, yDat, yFit, initz, minSize, errBound, speciesDict,
+ initP=None):
+ """ Fit an absorption complex by iteratively adding and optimizing
+ voigt profiles.
+
+ A complex is defined as a region where some number of lines may be present,
+ or a region of non zero of absorption. Lines are iteratively added
+ and optimized until the difference between the flux generated using
+ the optimized parameters has a least squares difference between the
+ desired flux profile less than the error bound.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ initP : (,3,) ndarray
+ initial guess to try for line parameters to fit the region. Used
+ by large_flag_fit. Default = None, and initial guess generated
+ automatically.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ Array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ flag : bool
+ boolean value indicating the success of the fit (True if unsuccessful)
+ """
+
+ #Setup initial line guesses
+ if initP==None: #Regular fit
+ initP = [0,0,0]
+ if min(yDat)<.5: #Large lines get larger initial guess
+ initP[0] = 10**16
+ elif min(yDat)>.9: #Small lines get smaller initial guess
+ initP[0] = 10**12.5
+ else:
+ initP[0] = speciesDict['init_N']
+ initP[1] = speciesDict['init_b']
+ initP[2]=initz
+ initP=na.array([initP])
+
+ linesP = initP
+
+ #For generating new z guesses
+ wl0 = speciesDict['wavelength'][0]
+
+ #Check if first line exists still
+ if min(yDat-yFit+1)>minSize:
+ return [],False
+
+ #Values to proceed through first run
+ errSq,prevErrSq=1,1000
+
+ while True:
+ #Initial parameter guess from joining parameters from all lines
+ # in lines into a single array
+ initP = linesP.flatten()
+
+ #Optimize line
+ fitP,success=optimize.leastsq(_voigt_error,initP,
+ args=(x,yDat,yFit,speciesDict),
+ epsfcn=1E-10,maxfev=1000)
+
+ #Set results of optimization
+ linesP = na.reshape(fitP,(-1,3))
+
+ #Generate difference between current best fit and data
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+
+ #Sum to get idea of goodness of fit
+ errSq=sum(dif**2)
+
+ #If good enough, break
+ if errSq < errBound:
+ break
+
+ #If last fit was worse, reject the last line and revert to last fit
+ if errSq > prevErrSq*10:
+ #If its still pretty damn bad, cut losses and try flag fit tools
+ if prevErrSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ yNewFit=_gen_flux_lines(x,prevLinesP,speciesDict)
+ break
+
+ #If too many lines
+ if na.shape(linesP)[0]>8 or na.size(linesP)+3>=len(x):
+ #If its fitable by flag tools and still bad, use flag tools
+ if errSq >1E2*errBound and speciesDict['name']=='HI lya':
+ return [],True
+ else:
+ break
+
+ #Store previous data in case reject next fit
+ prevErrSq = errSq
+ prevLinesP = linesP
+
+
+ #Set up initial condition for new line
+ newP = [0,0,0]
+ if min(dif)<.1:
+ newP[0]=10**12
+ elif min(dif)>.9:
+ newP[0]=10**16
+ else:
+ newP[0]=10**14
+ newP[1] = speciesDict['init_b']
+ newP[2]=(x[dif.argmax()]-wl0)/wl0
+ linesP=na.append(linesP,[newP],axis=0)
+
+
+ #Check the parameters of all lines to see if they fall in an
+ # acceptable range, as given in dict ref
+ remove=[]
+ for i,p in enumerate(linesP):
+ check=_check_params(na.array([p]),speciesDict)
+ if check:
+ remove.append(i)
+ linesP = na.delete(linesP,remove,axis=0)
+
+ return linesP,False
+
+def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
+ """
+ Attempts to more robustly fit saturated lyman alpha regions that have
+ not converged to satisfactory fits using the standard tools.
+
+ Uses a preselected sample of a wide range of initial parameter guesses
+ designed to fit saturated lines (see get_test_lines).
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelength
+ ydat : (N) ndarray
+ array of desired flux profile to be fitted for the wavelength
+ space given by x. Same size as x.
+ yFit : (N) ndarray
+ array of flux profile fitted for the wavelength
+ space given by x already. Same size as x.
+ initz : float
+ redshift to try putting first line at
+ (maximum absorption for region)
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+ minsize : float
+ minimum absorption allowed for a line to still count as a line
+ given in normalized flux (ie: for minSize=.9, only lines with minimum
+ flux less than .9 will be fitted)
+ errbound : float
+ maximum total error allowed for an acceptable fit
+
+ Returns
+ -------
+ bestP : (3,) ndarray
+ array of best parameters if a good enough fit is found in
+ the form [[N1,b1,z1], [N2,b2,z2],...]
+ """
+
+ #Set up some initial line guesses
+ lineTests = _get_test_lines(initz)
+
+ #Keep track of the lowest achieved error
+ bestError = 1000
+
+ #Iterate through test line guesses
+ for initLines in lineTests:
+ if initLines[1,0]==0:
+ initLines = na.delete(initLines,1,axis=0)
+
+ #Do fitting with initLines as first guess
+ linesP,flag=_complex_fit(x,yDat,yFit,initz,
+ minSize,errBound,speciesDict,initP=initLines)
+
+ #Find error of last fit
+ yNewFit=_gen_flux_lines(x,linesP,speciesDict)
+ dif = yFit*yNewFit-yDat
+ errSq=sum(dif**2)
+
+ #If error lower, keep track of the lines used to make that fit
+ if errSq < bestError:
+ bestError = errSq
+ bestP = linesP
+
+ if bestError>10*errBound*len(x):
+ return []
+ else:
+ return bestP
+
+def _get_test_lines(initz):
+ """
+ Returns a 3d numpy array of lines to test as initial guesses for difficult
+ to fit lyman alpha absorbers that are saturated.
+
+ The array is 3d because
+ the first dimension gives separate initial guesses, the second dimension
+ has multiple lines for the same guess (trying a broad line plus a
+ saturated line) and the 3d dimension contains the 3 fit parameters (N,b,z)
+
+ Parameters
+ ----------
+ initz : float
+ redshift to give all the test lines
+
+ Returns
+ -------
+ testP : (,3,) ndarray
+ numpy array of the form
+ [[[N1a,b1a,z1a], [N1b,b1b,z1b]], [[N2a,b2,z2a],...] ...]
+ """
+
+ #Set up a bunch of empty lines
+ testP = na.zeros((10,2,3))
+
+ testP[0,0,:]=[1E18,20,initz]
+ testP[1,0,:]=[1E18,40,initz]
+ testP[2,0,:]=[1E16,5, initz]
+ testP[3,0,:]=[1E16,20,initz]
+ testP[4,0,:]=[1E16,80,initz]
+
+ testP[5,0,:]=[1E18,20,initz]
+ testP[6,0,:]=[1E18,40,initz]
+ testP[7,0,:]=[1E16,5, initz]
+ testP[8,0,:]=[1E16,20,initz]
+ testP[9,0,:]=[1E16,80,initz]
+
+ testP[5,1,:]=[1E13,100,initz]
+ testP[6,1,:]=[1E13,100,initz]
+ testP[7,1,:]=[1E13,100,initz]
+ testP[8,1,:]=[1E13,100,initz]
+ testP[9,1,:]=[1E13,100,initz]
+
+ return testP
+
+def _get_bounds(z, b, wl, x0, xRes):
+ """
+ Gets the indices of range of wavelength that the wavelength wl is in
+ with the size of some initial wavelength range.
+
+ Used for checking if species with multiple lines (as in the OVI doublet)
+ fit all lines appropriately.
+
+ Parameters
+ ----------
+ z : float
+ redshift
+ b : (3) ndarray/list
+ initial bounds in form [i0,i1,i2] where i0 is the index of the
+ minimum flux for the complex, i1 is index of the lower wavelength
+ edge of the complex, and i2 is the index of the higher wavelength
+ edge of the complex.
+ wl : float
+ unredshifted wavelength of the peak of the new region
+ x0 : float
+ wavelength of the index 0
+ xRes : float
+ difference in wavelength for two consecutive indices
+
+ Returns
+ -------
+ indices : (2) tuple
+ Tuple (i1,i2) where i1 is the index of the lower wavelength bound of
+ the new region and i2 is the index of the higher wavelength bound of
+ the new region
+ """
+
+ r=[-b[1]+100+b[0],b[2]+100-b[0]]
+ redWl = (z+1)*wl
+ iRedWl=int((redWl-x0)/xRes)
+ indices = (iRedWl-r[0],iRedWl+r[1])
+
+ return indices
+
+def _remove_unaccepted_partners(linesP, x, y, b, errBound,
+ x0, xRes, speciesDict):
+ """
+ Given a set of parameters [N,b,z] that form multiple lines for a given
+ species (as in the OVI doublet), remove any set of parameters where
+ not all transition wavelengths have a line that matches the fit.
+
+ (ex: if a fit is determined based on the first line of the OVI doublet,
+ but the given parameters give a bad fit of the wavelength space of
+ the second line then that set of parameters is removed from the array
+ of line parameters.)
+
+ Parameters
+ ----------
+ linesP : (3,) ndarray
+ array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ wavelength array [nm]
+ y : (N) ndarray
+ normalized flux array of original data
+ b : (3) tuple/list/ndarray
+ indices that give the bounds of the original region so that another
+ region of similar size can be used to determine the goodness
+ of fit of the other wavelengths
+ errBound : float
+ size of the error that is appropriate for a given region,
+ adjusted to account for the size of the region.
+
+ Returns
+ -------
+ linesP : (3,) ndarray
+ array similar to linesP that only contains lines with
+ appropriate fits of all transition wavelengths.
+ """
+
+ #List of lines to remove
+ removeLines=[]
+
+ #Iterate through all sets of line parameters
+ for i,p in enumerate(linesP):
+
+ #iterate over all transition wavelengths
+ for wl in speciesDict['wavelength']:
+
+ #Get the bounds of a similar sized region around the
+ # appropriate wavelength, and then get the appropriate
+ # region of wavelength and flux
+ lb = _get_bounds(p[2],b,wl,x0,xRes)
+ xb,yb=x[lb[0]:lb[1]],y[lb[0]:lb[1]]
+
+ #Generate a fit and find the difference to data
+ yFitb=_gen_flux_lines(xb,na.array([p]),speciesDict)
+ dif =yb-yFitb
+
+ #Only counts as an error if line is too big ---------------<
+ dif = [k for k in dif if k>0]
+ err = sum(dif)
+
+ #If the fit is too bad then add the line to list of removed lines
+ if err > errBound*1E2:
+ removeLines.append(i)
+ break
+
+ #Remove all bad line fits
+ linesP = na.delete(linesP,removeLines,axis=0)
+
+ return linesP
+
+
+
+def _line_exists(wavelengths, y, z, x0, xRes,fluxMin):
+ """For a group of lines finds if the there is some change in flux greater
+ than some minimum at the same redshift with different initial wavelengths
+
+ Parameters
+ ----------
+ wavelengths : (N) ndarray
+ array of initial wavelengths to check
+ y : (N) ndarray
+ flux array to check
+ x0 : float
+ wavelength of the first value in y
+ xRes : float
+ difference in wavelength between consecutive cells in flux array
+ fluxMin : float
+ maximum flux to count as a line existing.
+
+ Returns
+ -------
+
+ flag : boolean
+ value indicating whether all lines exist. True if all lines exist
+ """
+
+ #Iterate through initial wavelengths
+ for wl in wavelengths:
+ #Redshifted wavelength
+ redWl = (z+1)*wl
+
+ #Index of the redshifted wavelength
+ indexRedWl = (redWl-x0)/xRes
+
+ #Check if surpasses minimum absorption bound
+ if y[int(indexRedWl)]>fluxMin:
+ return False
+
+ return True
+
+def _find_complexes(x, yDat, complexLim=.999, fitLim=.99,
+ minLength =3, maxLength=1000, splitLim=.99):
+ """Breaks up the wavelength space into groups
+ where there is some absorption.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ array of wavelengths
+ yDat : (N) ndarray
+ array of flux corresponding to the wavelengths given
+ in x. (needs to be the same size as x)
+ complexLim : float, optional
+ Maximum flux to start the edge of an absorption complex. Different
+ from fitLim because it decides extent of a complex rather than
+ whether or not a complex is accepted.
+ fitLim : float,optional
+ Maximum flux where the level of absorption will trigger
+ identification of the region as an absorption complex. Default = .98.
+ (ex: for a minSize=.98, a region where all the flux is between 1.0 and
+ .99 will not be separated out to be fit as an absorbing complex, but
+ a region that contains a point where the flux is .97 will be fit
+ as an absorbing complex.)
+ minLength : int, optional
+ number of cells required for a complex to be included.
+ default is 3 cells.
+ maxLength : int, optional
+ number of cells required for a complex to be split up. Default
+ is 1000 cells.
+ splitLim : float, optional
+ if attempting to split a region for being larger than maxlength
+ the point of the split must have a flux greater than splitLim
+ (ie: absorption greater than splitLim). Default= .99.
+
+ Returns
+ -------
+ cBounds : (3,)
+ list of bounds in the form [[i0,i1,i2],...] where i0 is the
+ index of the maximum flux for a complex, i1 is the index of the
+ beginning of the complex, and i2 is the index of the end of the
+ complex. Indexes refer to the indices of x and yDat.
+ """
+
+ #Initialize empty list of bounds
+ cBounds=[]
+
+ #Iterate through cells of flux
+ i=0
+ while (i<len(x)):
+
+ #Start tracking at a region that surpasses flux of edge
+ if yDat[i]<complexLim:
+
+ #Iterate through until reach next edge
+ j=0
+ while yDat[i+j]<complexLim: j=j+1
+
+ #Check if the complex is big enough
+ if j >minLength:
+
+ #Check if there is enough absorption for the complex to
+ # be included
+ cPeak = yDat[i:i+j].argmin()
+ if yDat[cPeak+i]<fitLim:
+ cBounds.append([cPeak+i,i,i+j])
+
+ i=i+j
+ i=i+1
+
+ i=0
+ #Iterate through the bounds
+ while i < len(cBounds):
+ b=cBounds[i]
+
+ #Check if the region needs to be divided
+ if b[2]-b[1]>maxLength:
+
+ #Find the minimum absorption in the middle two quartiles of
+ # the large complex
+ q=(b[2]-b[1])/4
+ cut = yDat[b[1]+q:b[2]-q].argmax()+b[1]+q
+
+ #Only break it up if the minimum absorption is actually low enough
+ if yDat[cut]>splitLim:
+
+ #Get the new two peaks
+ b1Peak = yDat[b[1]:cut].argmin()+b[1]
+ b2Peak = yDat[cut:b[2]].argmin()+cut
+
+ #add the two regions separately
+ cBounds.insert(i+1,[b1Peak,b[1],cut])
+ cBounds.insert(i+2,[b2Peak,cut,b[2]])
+
+ #Remove the original region
+ cBounds.pop(i)
+ i=i+1
+ i=i+1
+
+ return cBounds
+
+def _gen_flux_lines(x, linesP, speciesDict):
+ """
+ Calculates the normalized flux for a region of wavelength space
+ generated by a set of absorption lines.
+
+ Parameters
+ ----------
+ x : (N) ndarray
+ Array of wavelength
+ linesP: (3,) ndarray
+ Array giving sets of line parameters in
+ form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ Dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+
+ Returns
+ -------
+ flux : (N) ndarray
+ Array of normalized flux generated by the line parameters
+ given in linesP over the wavelength space given in x. Same size as x.
+ """
+ y=0
+ for p in linesP:
+ for i in range(speciesDict['numLines']):
+ f=speciesDict['f'][i]
+ g=speciesDict['Gamma'][i]
+ wl=speciesDict['wavelength'][i]
+ y = y+ _gen_tau(x,p,f,g,wl)
+ flux = na.exp(-y)
+ return flux
+
+def _gen_tau(t, p, f, Gamma, lambda_unshifted):
+ """This calculates a flux distribution for given parameters using the yt
+ voigt profile generator"""
+ N,b,z= p
+
+ #Calculating quantities
+ tau_o = 1.4973614E-15*N*f*lambda_unshifted/b
+ a=7.95774715459E-15*Gamma*lambda_unshifted/b
+ x=299792.458/b*(lambda_unshifted*(1+z)/t-1)
+
+ H = na.zeros(len(x))
+ H = voigt(a,x)
+
+ tau = tau_o*H
+
+ return tau
+
+def _voigt_error(pTotal, x, yDat, yFit, speciesDict):
+ """
+ Gives the error of each point used to optimize the fit of a group
+ of absorption lines to a given flux profile.
+
+ If the parameters are not in the acceptable range as defined
+ in speciesDict, the first value of the error array will
+ contain a large value (999), to prevent the optimizer from running
+ into negative number problems.
+
+ Parameters
+ ----------
+ pTotal : (3,) ndarray
+ Array with form [[N1, b1, z1], ...]
+ x : (N) ndarray
+ array of wavelengths [nm]
+ yDat : (N) ndarray
+ desired normalized flux from fits of lines in wavelength
+ space given by x
+ yFit : (N) ndarray
+ previous fit over the wavelength space given by x.
+ speciesDict : dictionary
+ dictionary containing all relevant parameters needed
+ to create an absorption line of a given species (f,Gamma,lambda0)
+ as well as max and min values for parameters to be fit
+
+ Returns
+ -------
+ error : (N) ndarray
+ the difference between the fit generated by the parameters
+ given in pTotal multiplied by the previous fit and the desired
+ flux profile, w/ first index modified appropriately for bad
+ parameter choices
+ """
+
+ pTotal.shape = (-1,3)
+ yNewFit = _gen_flux_lines(x,pTotal,speciesDict)
+
+ error = yDat-yFit*yNewFit
+ error[0] = _check_params(pTotal,speciesDict)
+
+ return error
+
+def _check_params(p, speciesDict):
+ """
+ Check to see if any of the parameters in p fall outside the range
+ given in speciesDict.
+
+ Parameters
+ ----------
+ p : (3,) ndarray
+ array with form [[N1, b1, z1], ...]
+ speciesDict : dictionary
+ dictionary with properties giving the max and min
+ values appropriate for each parameter N,b, and z.
+
+ Returns
+ -------
+ check : int
+ 0 if all values are fine
+ 999 if any values fall outside acceptable range
+ """
+ check = 0
+ if any(p[:,0] > speciesDict['maxN']) or\
+ any(p[:,0] < speciesDict['minN']) or\
+ any(p[:,1] > speciesDict['maxb']) or\
+ any(p[:,1] < speciesDict['minb']) or\
+ any(p[:,2] > speciesDict['maxz']) or\
+ any(p[:,2] < speciesDict['minz']):
+ check = 999
+ return check
+
+
+def _output_fit(lineDic, file_name = 'spectrum_fit.h5'):
+ """
+ This function is designed to output the parameters of the series
+ of lines used to fit an absorption spectrum.
+
+ The dataset contains entries in the form species/N, species/b
+ species/z, and species/complex. The ith entry in each of the datasets
+ is the fitted parameter for the ith line fitted to the spectrum for
+ the given species. The species names come from the fitted line
+ dictionary.
+
+ Parameters
+ ----------
+ lineDic : dictionary
+ Dictionary of dictionaries representing the fit lines.
+ Top level keys are the species given in orderFits and the corresponding
+ entries are dictionaries with the keys 'N','b','z', and 'group#'.
+ Each of these corresponds to a list of the parameters for every
+ accepted fitted line.
+ fileName : string, optional
+ Name of the file to output fit to. Default = 'spectrum_fit.h5'
+
+ """
+ f = h5py.File(file_name, 'w')
+ for ion, params in lineDic.iteritems():
+ f.create_dataset("{0}/N".format(ion),data=params['N'])
+ f.create_dataset("{0}/b".format(ion),data=params['b'])
+ f.create_dataset("{0}/z".format(ion),data=params['z'])
+ f.create_dataset("{0}/complex".format(ion),data=params['group#'])
+ print 'Writing spectrum fit to {0}'.format(file_name)
+
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/analysis_modules/absorption_spectrum/api.py
--- a/yt/analysis_modules/absorption_spectrum/api.py
+++ b/yt/analysis_modules/absorption_spectrum/api.py
@@ -30,3 +30,6 @@
from .absorption_spectrum import \
AbsorptionSpectrum
+
+from .absorption_spectrum_fit import \
+ generate_total_fit
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -28,7 +28,7 @@
import ConfigParser, os, os.path, types
ytcfgDefaults = dict(
- serialize = 'True',
+ serialize = 'False',
onlydeserialize = 'False',
timefunctions = 'False',
logfile = 'False',
@@ -62,7 +62,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold008',
+ gold_standard_filename = 'gold010',
local_standard_filename = 'local001',
sketchfab_api_key = 'None',
thread_field_detection = 'False'
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -89,3 +89,6 @@
from particle_trajectories import \
ParticleTrajectoryCollection
+
+from particle_filters import \
+ particle_filter
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -498,10 +498,13 @@
def _fill_fields(self, fields):
output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
for field in fields]
+ domain_dims = self.pf.domain_dimensions.astype("int64") \
+ * self.pf.refine_by**self.level
for chunk in self._data_source.chunks(fields, "io"):
input_fields = [chunk[field] for field in fields]
fill_region(input_fields, output_fields, self.level,
- self.global_startindex, chunk.icoords, chunk.ires)
+ self.global_startindex, chunk.icoords, chunk.ires,
+ domain_dims, self.pf.refine_by)
for name, v in zip(fields, output_fields):
self[name] = v
@@ -654,13 +657,14 @@
def _fill_fields(self, fields):
ls = self._initialize_level_state(fields)
for level in range(self.level + 1):
- tot = 0
+ domain_dims = self.pf.domain_dimensions.astype("int64") \
+ * self.pf.refine_by**level
for chunk in ls.data_source.chunks(fields, "io"):
chunk[fields[0]]
input_fields = [chunk[field] for field in fields]
- tot += fill_region(input_fields, ls.fields, ls.current_level,
+ fill_region(input_fields, ls.fields, ls.current_level,
ls.global_startindex, chunk.icoords,
- chunk.ires)
+ chunk.ires, domain_dims, self.pf.refine_by)
self._update_level_state(ls)
for name, v in zip(fields, ls.fields):
if self.level > 0: v = v[1:-1,1:-1,1:-1]
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -414,10 +414,12 @@
def blocks(self):
for io_chunk in self.chunks([], "io"):
for i,chunk in enumerate(self.chunks([], "spatial", ngz = 0)):
- g = self._current_chunk.objs[0]
- mask = g._get_selector_mask(self.selector)
- if mask is None: continue
- yield g, mask
+ # For grids this will be a grid object, and for octrees it will
+ # be an OctreeSubset. Note that we delegate to the sub-object.
+ o = self._current_chunk.objs[0]
+ for b, m in o.select_blocks(self.selector):
+ if m is None: continue
+ yield b, m
class GenerationInProgress(Exception):
def __init__(self, fields):
@@ -436,7 +438,9 @@
@property
def selector(self):
if self._selector is not None: return self._selector
- sclass = getattr(yt.geometry.selection_routines,
+ s_module = getattr(self, '_selector_module',
+ yt.geometry.selection_routines)
+ sclass = getattr(s_module,
"%s_selector" % self._type_name, None)
if sclass is None:
raise YTDataSelectorNotImplemented(self._type_name)
@@ -459,7 +463,9 @@
for field in itertools.cycle(fields_to_get):
if inspected >= len(fields_to_get): break
inspected += 1
- if field not in self.pf.field_dependencies: continue
+ fd = self.pf.field_dependencies.get(field, None) or \
+ self.pf.field_dependencies.get(field[1], None)
+ if fd is None: continue
fd = self.pf.field_dependencies[field]
requested = self._determine_fields(list(set(fd.requested)))
deps = [d for d in requested if d not in fields_to_get]
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -277,7 +277,14 @@
mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
- finfo = self.pf._get_field_info(*field)
+ finfo = self.pf._get_field_info(*field)
+ # For those cases where we are guessing the field type, we will
+ # need to re-update -- otherwise, our item will always not have the
+ # field type. This can lead to, for instance, "unknown" particle
+ # types not getting correctly identified.
+ # Note that the *only* way this works is if we also fix our field
+ # dependencies during checking. Bug #627 talks about this.
+ item = self.pf._last_freq
else:
FI = getattr(self.pf, "field_info", FieldInfo)
if item in FI:
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -492,12 +492,16 @@
if vals is None: return
return vals.reshape(self.ActiveDimensions, order="C")
+ def select_blocks(self, selector):
+ mask = self._get_selector_mask(selector)
+ yield self, mask
+
def _get_selector_mask(self, selector):
- if id(selector) == self._last_selector_id:
+ if hash(selector) == self._last_selector_id:
mask = self._last_mask
else:
self._last_mask = mask = selector.fill_mask(self)
- self._last_selector_id = id(selector)
+ self._last_selector_id = hash(selector)
if mask is None:
self._last_count = 0
else:
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -91,16 +91,32 @@
return tr
return tr
+ @property
+ def nz(self):
+ return self._num_zones + 2*self._num_ghost_zones
+
def _reshape_vals(self, arr):
if len(arr.shape) == 4: return arr
- nz = self._num_zones + 2*self._num_ghost_zones
+ nz = self.nz
n_oct = arr.shape[0] / (nz**3.0)
- arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+ if arr.size == nz*nz*nz*n_oct:
+ arr = arr.reshape((nz, nz, nz, n_oct), order="F")
+ elif arr.size == nz*nz*nz*n_oct * 3:
+ arr = arr.reshape((nz, nz, nz, n_oct, 3), order="F")
+ else:
+ raise RuntimeError
arr = np.asfortranarray(arr)
return arr
_domain_ind = None
+ def select_blocks(self, selector):
+ mask = self.oct_handler.mask(selector)
+ mask = self._reshape_vals(mask)
+ slicer = OctreeSubsetBlockSlice(self)
+ for i, sl in slicer:
+ yield sl, mask[:,:,:,i]
+
@property
def domain_ind(self):
if self._domain_ind is None:
@@ -113,12 +129,17 @@
cls = getattr(particle_deposit, "deposit_%s" % method, None)
if cls is None:
raise YTParticleDepositionNotImplemented(method)
- nvals = (2, 2, 2, (self.domain_ind >= 0).sum())
+ nz = self.nz
+ nvals = (nz, nz, nz, (self.domain_ind >= 0).sum())
op = cls(nvals) # We allocate number of zones, not number of octs
op.initialize()
- mylog.debug("Depositing %s particles into %s Octs",
- positions.shape[0], nvals[-1])
- op.process_octree(self.oct_handler, self.domain_ind, positions, fields,
+ mylog.debug("Depositing %s (%s^3) particles into %s Octs",
+ positions.shape[0], positions.shape[0]**0.3333333, nvals[-1])
+ pos = np.array(positions, dtype="float64")
+ # We should not need the following if we know in advance all our fields
+ # need no casting.
+ fields = [np.asarray(f, dtype="float64") for f in fields]
+ op.process_octree(self.oct_handler, self.domain_ind, pos, fields,
self.domain_id, self._domain_offset)
vals = op.finalize()
if vals is None: return
@@ -127,7 +148,7 @@
def select_icoords(self, dobj):
d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -135,7 +156,7 @@
def select_fcoords(self, dobj):
d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -143,7 +164,7 @@
def select_fwidth(self, dobj):
d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
domain_id = self.domain_id)
return tr
@@ -151,7 +172,7 @@
def select_ires(self, dobj):
d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
num_octs = self._num_octs)
- self._num_octs = d.shape[0] / 8
+ self._num_octs = d.shape[0] / (self.nz**3)
tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
domain_id = self.domain_id)
return tr
@@ -162,7 +183,7 @@
return n
def count(self, selector):
- if id(selector) == self._last_selector_id:
+ if hash(selector) == self._last_selector_id:
if self._last_mask is None: return 0
return self._last_mask.sum()
self.select(selector)
@@ -182,7 +203,7 @@
# This is some subset of an octree. Note that the sum of subsets of an
# octree may multiply include data files. While we can attempt to mitigate
# this, it's unavoidable for many types of data storage on disk.
- _type_name = 'particle_octree_subset'
+ _type_name = 'indexed_octree_subset'
_con_args = ('data_files', 'pf', 'min_ind', 'max_ind')
domain_id = -1
def __init__(self, base_region, data_files, pf, min_ind = 0, max_ind = 0):
@@ -203,3 +224,49 @@
self.base_region = base_region
self.base_selector = base_region.selector
+class OctreeSubsetBlockSlice(object):
+ def __init__(self, octree_subset):
+ self.ind = None
+ self.octree_subset = octree_subset
+ # Cache some attributes
+ nz = octree_subset.nz
+ self.ActiveDimensions = np.array([nz,nz,nz], dtype="int64")
+ for attr in ["ires", "icoords", "fcoords", "fwidth"]:
+ v = getattr(octree_subset, attr)
+ setattr(self, "_%s" % attr, octree_subset._reshape_vals(v))
+
+ def __iter__(self):
+ for i in range(self._ires.shape[-1]):
+ self.ind = i
+ yield i, self
+
+ def clear_data(self):
+ pass
+
+ def __getitem__(self, key):
+ return self.octree_subset[key][:,:,:,self.ind]
+
+ def get_vertex_centered_data(self, *args, **kwargs):
+ raise NotImplementedError
+
+ @property
+ def id(self):
+ return np.random.randint(1)
+
+ @property
+ def Level(self):
+ return self._ires[0,0,0,self.ind]
+
+ @property
+ def LeftEdge(self):
+ LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+ return LE
+
+ @property
+ def RightEdge(self):
+ RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+ return RE
+
+ @property
+ def dds(self):
+ return self._fwidth[0,0,0,self.ind,:]
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -41,6 +41,32 @@
mass_sun_cgs, \
mh
+def _field_concat(fname):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
+ v.append(data[ptype, fname].copy())
+ rv = np.concatenate(v, axis=0)
+ return rv
+ return _AllFields
+
+def _field_concat_slice(fname, axi):
+ def _AllFields(field, data):
+ v = []
+ for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
+ if ptype == "all" or \
+ ptype in data.pf.known_filters:
+ continue
+ v.append(data[ptype, fname][:,axi])
+ rv = np.concatenate(v, axis=0)
+ return rv
+ return _AllFields
+
def particle_deposition_functions(ptype, coord_name, mass_name, registry):
orig = set(registry.keys())
def particle_count(field, data):
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -183,6 +183,8 @@
# Get our bins
if log_space:
+ if lower_bound <= 0.0 or upper_bound <= 0.0:
+ raise YTIllDefinedBounds(lower_bound, upper_bound)
func = np.logspace
lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
else:
@@ -522,7 +524,10 @@
return [self.x_bin_field, self.y_bin_field]
def fix_bounds(upper, lower, logit):
- if logit: return np.log10(upper), np.log10(lower)
+ if logit:
+ if lower <= 0.0 or upper <= 0.0:
+ raise YTIllDefinedBounds(lower, upper)
+ return np.log10(upper), np.log10(lower)
return upper, lower
class BinnedProfile2DInlineCut(BinnedProfile2D):
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -248,33 +248,6 @@
self._set_center(center)
self.coord = coord
- def reslice(self, coord):
- """
- Change the entire dataset, clearing out the current data and slicing at
- a new location. Not terribly useful except for in-place plot changes.
- """
- mylog.debug("Setting coordinate to %0.5e" % coord)
- self.coord = coord
- self.field_data.clear()
-
- def shift(self, val):
- """
- Moves the slice coordinate up by either a floating point value, or an
- integer number of indices of the finest grid.
- """
- if isinstance(val, types.FloatType):
- # We add the dx
- self.coord += val
- elif isinstance(val, types.IntType):
- # Here we assume that the grid is the max level
- level = self.hierarchy.max_level
- self.coord
- dx = self.hierarchy.select_grids(level)[0].dds[self.axis]
- self.coord += dx * val
- else:
- raise ValueError(val)
- self.field_data.clear()
-
def _generate_container_field(self, field):
if self._current_chunk is None:
self.hierarchy._identify_base_chunk(self)
@@ -375,7 +348,6 @@
self._d = -1.0 * np.dot(self._norm_vec, self.center)
self._x_vec = self.orienter.unit_vectors[0]
self._y_vec = self.orienter.unit_vectors[1]
- self._d = -1.0 * np.dot(self._norm_vec, self.center)
# First we try all three, see which has the best result:
vecs = np.identity(3)
self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
@@ -672,38 +644,6 @@
raise SyntaxError("Making a fixed resolution slice with "
"particles isn't supported yet.")
- def reslice(self, normal, center, width):
-
- # Cleanup
- del self._coord
- del self._pixelmask
-
- self.center = center
- self.width = width
- self.dds = self.width / self.dims
- self.set_field_parameter('center', center)
- self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
- self._d = -1.0 * np.dot(self._norm_vec, self.center)
- # First we try all three, see which has the best result:
- vecs = np.identity(3)
- _t = np.cross(self._norm_vec, vecs).sum(axis=1)
- ax = _t.argmax()
- self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
- self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
- self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
- self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
- self.set_field_parameter('cp_x_vec',self._x_vec)
- self.set_field_parameter('cp_y_vec',self._y_vec)
- self.set_field_parameter('cp_z_vec',self._norm_vec)
- # Calculate coordinates of each pixel
- _co = self.dds * \
- (np.mgrid[-self.dims/2 : self.dims/2,
- -self.dims/2 : self.dims/2] + 0.5)
-
- self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
- np.outer(_co[1,:,:], self._y_vec)
- self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
def get_data(self, fields):
"""
Iterates over the list of fields and generates/reads them all.
@@ -888,7 +828,6 @@
"""
_type_name = "region"
_con_args = ('center', 'left_edge', 'right_edge')
- _dx_pad = 0.5
def __init__(self, center, left_edge, right_edge, fields = None,
pf = None, **kwargs):
YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/setup.py
--- a/yt/data_objects/setup.py
+++ b/yt/data_objects/setup.py
@@ -8,7 +8,7 @@
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('data_objects', parent_package, top_path)
+ config.add_subpackage("tests")
config.make_config_py() # installs __config__.py
- config.add_subpackage("tests")
#config.make_svn_version_py()
return config
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -86,8 +86,11 @@
if not os.path.exists(apath): raise IOError(filename)
if apath not in _cached_pfs:
obj = object.__new__(cls)
- _cached_pfs[apath] = obj
- return _cached_pfs[apath]
+ if obj._skip_cache is False:
+ _cached_pfs[apath] = obj
+ else:
+ obj = _cached_pfs[apath]
+ return obj
def __init__(self, filename, data_style=None, file_style=None):
"""
@@ -157,6 +160,10 @@
def _mrep(self):
return MinimalStaticOutput(self)
+ @property
+ def _skip_cache(self):
+ return False
+
def hub_upload(self):
self._mrep.upload()
@@ -261,6 +268,10 @@
raise YTGeometryNotSupported(self.geometry)
def add_particle_filter(self, filter):
+ # This is a dummy, which we set up to enable passthrough of "all"
+ # concatenation fields.
+ n = getattr(filter, "name", filter)
+ self.known_filters[n] = None
if isinstance(filter, types.StringTypes):
used = False
for f in filter_registry[filter]:
@@ -271,6 +282,7 @@
else:
used = self.h._setup_filtered_type(filter)
if not used:
+ self.known_filters.pop(n, None)
return False
self.known_filters[filter.name] = filter
return True
@@ -292,16 +304,23 @@
if fname == self._last_freq[1]:
return self._last_finfo
if fname in self.field_info:
+ # Sometimes, if guessing_type == True, this will be switched for
+ # the type of field it is. So we look at the field type and
+ # determine if we need to change the type.
+ fi = self._last_finfo = self.field_info[fname]
+ if fi.particle_type and self._last_freq[0] \
+ not in self.particle_types:
+ field = "all", field[1]
+ elif not fi.particle_type and self._last_freq[0] \
+ not in self.fluid_types:
+ field = self.default_fluid_type, field[1]
self._last_freq = field
- self._last_finfo = self.field_info[fname]
return self._last_finfo
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
if guessing_type and ("all", fname) in self.field_info:
self._last_freq = ("all", fname)
self._last_finfo = self.field_info["all", fname]
- mylog.debug("Guessing field %s is (%s, %s)", fname,
- "all", fname)
return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/tests/test_ellipsoid.py
--- a/yt/data_objects/tests/test_ellipsoid.py
+++ b/yt/data_objects/tests/test_ellipsoid.py
@@ -5,13 +5,23 @@
ytcfg["yt","loglevel"] = "50"
ytcfg["yt","__withintesting"] = "True"
+def _difference(x1, x2, dw):
+ rel = x1 - x2
+ rel[rel > dw/2.0] -= dw
+ rel[rel < -dw/2.0] += dw
+ return rel
+
def test_ellipsoid():
# We decompose in different ways
- cs = [np.array([0.5, 0.5, 0.5]),
+ cs = [
+ np.array([0.5, 0.5, 0.5]),
np.array([0.1, 0.2, 0.3]),
- np.array([0.8, 0.8, 0.8])]
+ np.array([0.8, 0.8, 0.8])
+ ]
+ np.random.seed(int(0x4d3d3d3))
for nprocs in [1, 2, 4, 8]:
pf = fake_random_pf(64, nprocs = nprocs)
+ DW = pf.domain_right_edge - pf.domain_left_edge
min_dx = 2.0/pf.domain_dimensions
ABC = np.random.random((3, 12)) * 0.1
e0s = np.random.random((3, 12))
@@ -26,10 +36,17 @@
e0 = e0s[:,i]
tilt = tilts[i]
ell = pf.h.ellipsoid(c, A, B, C, e0, tilt)
- yield assert_equal, np.all(ell["Radius"] <= A), True
+ yield assert_array_less, ell["Radius"], A
p = np.array([ell[ax] for ax in 'xyz'])
- v = np.zeros_like(ell["Radius"])
- v += (((p - c[:,None]) * ell._e0[:,None]).sum(axis=0) / ell._A)**2
- v += (((p - c[:,None]) * ell._e1[:,None]).sum(axis=0) / ell._B)**2
- v += (((p - c[:,None]) * ell._e2[:,None]).sum(axis=0) / ell._C)**2
- yield assert_equal, np.all(np.sqrt(v) <= 1.0), True
+ dot_evec = [np.zeros_like(ell["Radius"]) for i in range(3)]
+ vecs = [ell._e0, ell._e1, ell._e2]
+ mags = [ell._A, ell._B, ell._C]
+ my_c = np.array([c]*p.shape[1]).transpose()
+ for ax_i in range(3):
+ dist = _difference(p[ax_i,:], my_c[ax_i,:], DW[ax_i])
+ for ax_j in range(3):
+ dot_evec[ax_j] += dist * vecs[ax_j][ax_i]
+ dist = 0
+ for ax_i in range(3):
+ dist += dot_evec[ax_i]**2.0 / mags[ax_i]**2.0
+ yield assert_array_less, dist, 1.0
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ b/yt/data_objects/tests/test_fields.py
@@ -24,6 +24,7 @@
_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
def realistic_pf(fields, nprocs):
+ np.random.seed(int(0x4d3d3d3))
pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
pf.parameters["HydroMethod"] = "streaming"
pf.parameters["Gamma"] = 5.0/3.0
@@ -52,7 +53,7 @@
def __call__(self):
field = FieldInfo[self.field_name]
deps = field.get_dependencies()
- fields = deps.requested + _base_fields
+ fields = list(set(deps.requested + _base_fields))
skip_grids = False
needs_spatial = False
for v in field.validators:
@@ -100,3 +101,8 @@
for nproc in [1, 4, 8]:
test_all_fields.__name__ = "%s_%s" % (field, nproc)
yield TestFieldAccess(field, nproc)
+
+if __name__ == "__main__":
+ setup()
+ for t in test_all_fields():
+ t()
diff -r f024c47e76705ac4b3382a4ac8eea7548e1beb05 -r 3ac193628a44b13a2926af97989ca03352681b52 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -766,6 +766,8 @@
rdw = radius.copy()
for i, ax in enumerate('xyz'):
np.subtract(data["%s%s" % (field_prefix, ax)], center[i], r)
+ if data.pf.dimensionality < i+1:
+ break
if data.pf.periodicity[i] == True:
np.abs(r, r)
np.subtract(r, DW[i], rdw)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/7eab13f45a1b/
Changeset: 7eab13f45a1b
Branch: yt-3.0
User: juxtaposicion
Date: 2013-08-22 08:23:49
Summary: merge
Affected #: 26 files
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 .hgtags
--- a/.hgtags
+++ b/.hgtags
@@ -5158,3 +5158,4 @@
0000000000000000000000000000000000000000 hop callback
a71dffe4bc813fdadc506ccad9efb632e23dc843 yt-3.0a1
954d1ffcbf04c3d1b394c2ea05324d903a9a07cf yt-3.0a2
+f4853999c2b5b852006d6628719c882cddf966df yt-3.0a3
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -473,11 +473,18 @@
function do_setup_py
{
[ -e $1/done ] && return
- echo "Installing $1 (arguments: '$*')"
- [ ! -e $1/extracted ] && tar xfz $1.tar.gz
- touch $1/extracted
- cd $1
- if [ ! -z `echo $1 | grep h5py` ]
+ LIB=$1
+ shift
+ if [ -z "$@" ]
+ then
+ echo "Installing $LIB"
+ else
+ echo "Installing $LIB (arguments: '$@')"
+ fi
+ [ ! -e $LIB/extracted ] && tar xfz $LIB.tar.gz
+ touch $LIB/extracted
+ cd $LIB
+ if [ ! -z `echo $LIB | grep h5py` ]
then
shift
( ${DEST_DIR}/bin/python2.7 setup.py build --hdf5=${HDF5_DIR} $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -519,8 +526,8 @@
function get_ytproject
{
+ [ -e $1 ] && return
echo "Downloading $1 from yt-project.org"
- [ -e $1 ] && return
${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
}
@@ -551,67 +558,93 @@
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
+CYTHON='Cython-0.19.1'
+FORTHON='Forthon-0.8.11'
+PYX='PyX-0.12.1'
+PYTHON='Python-2.7.5'
+BZLIB='bzip2-1.0.6'
+FREETYPE_VER='freetype-2.4.12'
+H5PY='h5py-2.1.3'
+HDF5='hdf5-1.8.11'
+IPYTHON='ipython-1.0.0'
+LAPACK='lapack-3.4.2'
+PNG=libpng-1.6.3
+MATPLOTLIB='matplotlib-1.3.0'
+MERCURIAL='mercurial-2.7'
+NOSE='nose-1.3.0'
+NUMPY='numpy-1.7.1'
+PYTHON_HGLIB='python-hglib-1.0'
+PYZMQ='pyzmq-13.1.0'
+ROCKSTAR='rockstar-0.99.6'
+SCIPY='scipy-0.12.0'
+SQLITE='sqlite-autoconf-3071700'
+SYMPY='sympy-0.7.3'
+TORNADO='tornado-3.1'
+ZEROMQ='zeromq-3.2.3'
+ZLIB='zlib-1.2.8'
+
# Now we dump all our SHA512 files out.
-echo 'fb85d71bb4f80b35f0d0f1735c650dd75c5f84b05635ddf91d6241ff103b5a49158c5b851a20c15e05425f6dde32a4971b35fcbd7445f61865b4d61ffd1fbfa1 Cython-0.18.tar.gz' > Cython-0.18.tar.gz.sha512
+echo '9dcdda5b2ee2e63c2d3755245b7b4ed2f4592455f40feb6f8e86503195d9474559094ed27e789ab1c086d09da0bb21c4fe844af0e32a7d47c81ff59979b18ca0 Cython-0.19.1.tar.gz' > Cython-0.19.1.tar.gz.sha512
+echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
echo '4941f5aa21aff3743546495fb073c10d2657ff42b2aff401903498638093d0e31e344cce778980f28a7170c6d29eab72ac074277b9d4088376e8692dc71e55c1 PyX-0.12.1.tar.gz' > PyX-0.12.1.tar.gz.sha512
-echo '3349152c47ed2b63c5c9aabcfa92b8497ea9d71ca551fd721e827fcb8f91ff9fbbee6bba8f8cb2dea185701b8798878b4b2435c1496b63d4b4a37c624a625299 Python-2.7.4.tgz' > Python-2.7.4.tgz.sha512
+echo 'd6580eb170b36ad50f3a30023fe6ca60234156af91ccb3971b0b0983119b86f3a9f6c717a515c3c6cb72b3dcbf1d02695c6d0b92745f460b46a3defd3ff6ef2f Python-2.7.5.tgz' > Python-2.7.5.tgz.sha512
+echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
+echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
echo '00ace5438cfa0c577e5f578d8a808613187eff5217c35164ffe044fbafdfec9e98f4192c02a7d67e01e5a5ccced630583ad1003c37697219b0f147343a3fdd12 bzip2-1.0.6.tar.gz' > bzip2-1.0.6.tar.gz.sha512
echo 'a296dfcaef7e853e58eed4e24b37c4fa29cfc6ac688def048480f4bb384b9e37ca447faf96eec7b378fd764ba291713f03ac464581d62275e28eb2ec99110ab6 reason-js-20120623.zip' > reason-js-20120623.zip.sha512
-echo 'b46c93d76f8ce09c94765b20b2eeadf71207671f1131777de178b3727c235b4dd77f6e60d62442b96648c3c6749e9e4c1194c1b02af7e946576be09e1ff7ada3 freetype-2.4.11.tar.gz' > freetype-2.4.11.tar.gz.sha512
-echo '15ca0209e8d8f172cb0708a2de946fbbde8551d9bebc4a95fa7ae31558457a7f43249d5289d7675490c577deb4e0153698fd2407644078bf30bd5ab10135fce3 h5py-2.1.2.tar.gz' > h5py-2.1.2.tar.gz.sha512
-echo 'c68a425bacaa7441037910b9166f25b89e1387776a7749a5350793f89b1690350df5f018060c31d03686e7c3ed2aa848bd2b945c96350dc3b6322e087934783a hdf5-1.8.9.tar.gz' > hdf5-1.8.9.tar.gz.sha512
-echo 'b2b53ed358bacab9e8d63a51f17bd5f121ece60a1d7c53e8a8eb08ad8b1e4393a8d7a86eec06e2efc62348114f0d84c0a3dfc805e68e6edd93b20401962b3554 libpng-1.6.1.tar.gz' > libpng-1.6.1.tar.gz.sha512
-echo '497f91725eaf361bdb9bdf38db2bff5068a77038f1536df193db64c9b887e3b0d967486daee722eda6e2c4e60f034eee030673e53d07bf0db0f3f7c0ef3bd208 matplotlib-1.2.1.tar.gz' > matplotlib-1.2.1.tar.gz.sha512
-echo '928fdeaaf0eaec80adbd8765521de9666ab56aaa2101fb9ab2cb392d8b29475d3b052d89652ff9b67522cfcc6cd958717ac715f51b0573ee088e9a595f29afe2 mercurial-2.5.4.tar.gz' > mercurial-2.5.4.tar.gz.sha512
-echo 'a485daa556f6c76003de1dbb3e42b3daeee0a320c69c81b31a7d2ebbc2cf8ab8e96c214a4758e5e7bf814295dc1d6aa563092b714db7e719678d8462135861a8 numpy-1.7.0.tar.gz' > numpy-1.7.0.tar.gz.sha512
-echo '293d78d14a9347cb83e1a644e5f3e4447ed6fc21642c51683e5495dda08d2312194a73d1fc3c1d78287e33ed065aa251ecbaa7c0ea9189456c1702e96d78becd sqlite-autoconf-3071601.tar.gz' > sqlite-autoconf-3071601.tar.gz.sha512
-echo 'b1c073ad26684e354f7c522c14655840592e03872bc0a94690f89cae2ff88f146fce1dad252ff27a889dac4a32ff9f8ab63ba940671f9da89e9ba3e19f1bf58d zlib-1.2.7.tar.gz' > zlib-1.2.7.tar.gz.sha512
-echo '05ac335727a2c3036f31a2506fdd2615aa436bfbe2f81799fe6c51bffe2591ad6a8427f3b25c34e7e709fb4e7607a0589dc7a22185c1f9b894e90de6711a88aa ipython-0.13.1.tar.gz' > ipython-0.13.1.tar.gz.sha512
-echo 'b9d061ca49e54ea917e0aed2b2a48faef33061dbf6d17eae7f8c3fff0b35ca883e7324f6cb24bda542443f669dcd5748037a5f2309f4c359d68adef520894865 zeromq-3.2.2.tar.gz' > zeromq-3.2.2.tar.gz.sha512
-echo '852fce8a8308c4e1e4b19c77add2b2055ca2ba570b28e8364888df490af92b860c72e860adfb075b3405a9ceb62f343889f20a8711c9353a7d9059adee910f83 pyzmq-13.0.2.tar.gz' > pyzmq-13.0.2.tar.gz.sha512
-echo '303bd3fbea22be57fddf7df78ddf5a783d355a0c8071b1363250daafc20232ddd28eedc44aa1194f4a7afd82f9396628c5bb06819e02b065b6a1b1ae8a7c19e1 tornado-3.0.tar.gz' > tornado-3.0.tar.gz.sha512
-echo '3f53d0b474bfd79fea2536d0a9197eaef6c0927e95f2f9fd52dbd6c1d46409d0e649c21ac418d8f7767a9f10fe6114b516e06f2be4b06aec3ab5bdebc8768220 Forthon-0.8.11.tar.gz' > Forthon-0.8.11.tar.gz.sha512
-echo 'c13116c1f0547000cc565e15774687b9e884f8b74fb62a84e578408a868a84961704839065ae4f21b662e87f2aaedf6ea424ea58dfa9d3d73c06281f806d15dd nose-1.2.1.tar.gz' > nose-1.2.1.tar.gz.sha512
-echo 'd67de9567256e6f1649e4f3f7dfee63371d5f00fd3fd4f92426198f862e97c57f70e827d19f4e5e1929ad85ef2ce7aa5a0596b101cafdac71672e97dc115b397 python-hglib-0.3.tar.gz' > python-hglib-0.3.tar.gz.sha512
-echo 'ffc602eb346717286b3d0a6770c60b03b578b3cf70ebd12f9e8b1c8c39cdb12ef219ddaa041d7929351a6b02dbb8caf1821b5452d95aae95034cbf4bc9904a7a sympy-0.7.2.tar.gz' > sympy-0.7.2.tar.gz.sha512
+echo '609a68a3675087e0cc95268574f31e104549daa48efe15a25a33b8e269a93b4bd160f4c3e8178dca9c950ef5ca514b039d6fd1b45db6af57f25342464d0429ce freetype-2.4.12.tar.gz' > freetype-2.4.12.tar.gz.sha512
+echo '2eb7030f8559ff5cb06333223d98fda5b3a663b6f4a026949d1c423aa9a869d824e612ed5e1851f3bf830d645eea1a768414f73731c23ab4d406da26014fe202 h5py-2.1.3.tar.gz' > h5py-2.1.3.tar.gz.sha512
+echo 'e9db26baa297c8ed10f1ca4a3fcb12d6985c6542e34c18d48b2022db73014f054c8b8434f3df70dcf44631f38b016e8050701d52744953d0fced3272d7b6b3c1 hdf5-1.8.11.tar.gz' > hdf5-1.8.11.tar.gz.sha512
+echo '1b309c08009583e66d1725a2d2051e6de934db246129568fa6d5ba33ad6babd3b443e7c2782d817128d2b112e21bcdd71e66be34fbd528badd900f1d0ed3db56 ipython-1.0.0.tar.gz' > ipython-1.0.0.tar.gz.sha512
+echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '887582e5a22e4cde338aa8fec7a89f6dd31f2f02b8842735f00f970f64582333fa03401cea6d01704083403c7e8b7ebc26655468ce930165673b33efa4bcd586 libpng-1.6.3.tar.gz' > libpng-1.6.3.tar.gz.sha512
+echo '990e3a155ca7a9d329c41a43b44a9625f717205e81157c668a8f3f2ad5459ed3fed8c9bd85e7f81c509e0628d2192a262d4aa30c8bfc348bb67ed60a0362505a matplotlib-1.3.0.tar.gz' > matplotlib-1.3.0.tar.gz.sha512
+echo 'e425778edb0f71c34e719e04561ee3de37feaa1be4d60b94c780aebdbe6d41f8f4ab15103a8bbe8894ebeb228c42f0e2cd41b8db840f8384e1cd7cd2d5b67b97 mercurial-2.7.tar.gz' > mercurial-2.7.tar.gz.sha512
+echo 'a3b8060e415560a868599224449a3af636d24a060f1381990b175dcd12f30249edd181179d23aea06b0c755ff3dc821b7a15ed8840f7855530479587d4d814f4 nose-1.3.0.tar.gz' > nose-1.3.0.tar.gz.sha512
+echo 'd58177f3971b6d07baf6f81a2088ba371c7e43ea64ee7ada261da97c6d725b4bd4927122ac373c55383254e4e31691939276dab08a79a238bfa55172a3eff684 numpy-1.7.1.tar.gz' > numpy-1.7.1.tar.gz.sha512
+echo '9c0a61299779aff613131aaabbc255c8648f0fa7ab1806af53f19fbdcece0c8a68ddca7880d25b926d67ff1b9201954b207919fb09f6a290acb078e8bbed7b68 python-hglib-1.0.tar.gz' > python-hglib-1.0.tar.gz.sha512
+echo 'c65013293dd4049af5db009fdf7b6890a3c6b1e12dd588b58fb5f5a5fef7286935851fb7a530e03ea16f28de48b964e50f48bbf87d34545fd23b80dd4380476b pyzmq-13.1.0.tar.gz' > pyzmq-13.1.0.tar.gz.sha512
echo '172f2bc671145ebb0add2669c117863db35851fb3bdb192006cd710d4d038e0037497eb39a6d01091cb923f71a7e8982a77b6e80bf71d6275d5d83a363c8d7e5 rockstar-0.99.6.tar.gz' > rockstar-0.99.6.tar.gz.sha512
-echo 'd4fdd62f2db5285cd133649bd1bfa5175cb9da8304323abd74e0ef1207d55e6152f0f944da1da75f73e9dafb0f3bb14efba3c0526c732c348a653e0bd223ccfa scipy-0.11.0.tar.gz' > scipy-0.11.0.tar.gz.sha512
-echo '276bd9c061ec9a27d478b33078a86f93164ee2da72210e12e2c9da71dcffeb64767e4460b93f257302b09328eda8655e93c4b9ae85e74472869afbeae35ca71e blas.tar.gz' > blas.tar.gz.sha512
-echo '8770214491e31f0a7a3efaade90eee7b0eb20a8a6ab635c5f854d78263f59a1849133c14ef5123d01023f0110cbb9fc6f818da053c01277914ae81473430a952 lapack-3.4.2.tar.gz' > lapack-3.4.2.tar.gz.sha512
+echo '80c8e137c3ccba86575d4263e144ba2c4684b94b5cd620e200f094c92d4e118ea6a631d27bdb259b0869771dfaeeae68c0fdd37fdd740b9027ee185026e921d4 scipy-0.12.0.tar.gz' > scipy-0.12.0.tar.gz.sha512
+echo '96f3e51b46741450bc6b63779c10ebb4a7066860fe544385d64d1eda52592e376a589ef282ace2e1df73df61c10eab1a0d793abbdaf770e60289494d4bf3bcb4 sqlite-autoconf-3071700.tar.gz' > sqlite-autoconf-3071700.tar.gz.sha512
+echo '2992baa3edfb4e1842fb642abf0bf0fc0bf56fc183aab8fed6b3c42fbea928fa110ede7fdddea2d63fc5953e8d304b04da433dc811134fadefb1eecc326121b8 sympy-0.7.3.tar.gz' > sympy-0.7.3.tar.gz.sha512
+echo '101544db6c97beeadc5a02b2ef79edefa0a07e129840ace2e4aa451f3976002a273606bcdc12d6cef5c22ff4c1c9dcf60abccfdee4cbef8e3f957cd25c0430cf tornado-3.1.tar.gz' > tornado-3.1.tar.gz.sha512
+echo '34ffb6aa645f62bd1158a8f2888bf92929ccf90917a6c50ed51ed1240732f498522e164d1536f26480c87ad5457fe614a93bf0e15f2f89b0b168e64a30de68ca zeromq-3.2.3.tar.gz' > zeromq-3.2.3.tar.gz.sha512
+echo 'ece209d4c7ec0cb58ede791444dc754e0d10811cbbdebe3df61c0fd9f9f9867c1c3ccd5f1827f847c005e24eef34fb5bf87b5d3f894d75da04f1797538290e4a zlib-1.2.8.tar.gz' > zlib-1.2.8.tar.gz.sha512
# Individual processes
-[ -z "$HDF5_DIR" ] && get_ytproject hdf5-1.8.9.tar.gz
-[ $INST_ZLIB -eq 1 ] && get_ytproject zlib-1.2.7.tar.gz
-[ $INST_BZLIB -eq 1 ] && get_ytproject bzip2-1.0.6.tar.gz
-[ $INST_PNG -eq 1 ] && get_ytproject libpng-1.6.1.tar.gz
-[ $INST_FTYPE -eq 1 ] && get_ytproject freetype-2.4.11.tar.gz
-[ $INST_SQLITE3 -eq 1 ] && get_ytproject sqlite-autoconf-3071601.tar.gz
-[ $INST_PYX -eq 1 ] && get_ytproject PyX-0.12.1.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject zeromq-3.2.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject pyzmq-13.0.2.tar.gz
-[ $INST_0MQ -eq 1 ] && get_ytproject tornado-3.0.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject scipy-0.11.0.tar.gz
+[ -z "$HDF5_DIR" ] && get_ytproject $HDF5.tar.gz
+[ $INST_ZLIB -eq 1 ] && get_ytproject $ZLIB.tar.gz
+[ $INST_BZLIB -eq 1 ] && get_ytproject $BZLIB.tar.gz
+[ $INST_PNG -eq 1 ] && get_ytproject $PNG.tar.gz
+[ $INST_FTYPE -eq 1 ] && get_ytproject $FREETYPE_VER.tar.gz
+[ $INST_SQLITE3 -eq 1 ] && get_ytproject $SQLITE.tar.gz
+[ $INST_PYX -eq 1 ] && get_ytproject $PYX.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $ZEROMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $PYZMQ.tar.gz
+[ $INST_0MQ -eq 1 ] && get_ytproject $TORNADO.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $SCIPY.tar.gz
[ $INST_SCIPY -eq 1 ] && get_ytproject blas.tar.gz
-[ $INST_SCIPY -eq 1 ] && get_ytproject lapack-3.4.2.tar.gz
-get_ytproject Python-2.7.4.tgz
-get_ytproject numpy-1.7.0.tar.gz
-get_ytproject matplotlib-1.2.1.tar.gz
-get_ytproject mercurial-2.5.4.tar.gz
-get_ytproject ipython-0.13.1.tar.gz
-get_ytproject h5py-2.1.2.tar.gz
-get_ytproject Cython-0.18.tar.gz
+[ $INST_SCIPY -eq 1 ] && get_ytproject $LAPACK.tar.gz
+get_ytproject $PYTHON.tgz
+get_ytproject $NUMPY.tar.gz
+get_ytproject $MATPLOTLIB.tar.gz
+get_ytproject $MERCURIAL.tar.gz
+get_ytproject $IPYTHON.tar.gz
+get_ytproject $H5PY.tar.gz
+get_ytproject $CYTHON.tar.gz
get_ytproject reason-js-20120623.zip
-get_ytproject Forthon-0.8.11.tar.gz
-get_ytproject nose-1.2.1.tar.gz
-get_ytproject python-hglib-0.3.tar.gz
-get_ytproject sympy-0.7.2.tar.gz
-get_ytproject rockstar-0.99.6.tar.gz
+get_ytproject $FORTHON.tar.gz
+get_ytproject $NOSE.tar.gz
+get_ytproject $PYTHON_HGLIB.tar.gz
+get_ytproject $SYMPY.tar.gz
+get_ytproject $ROCKSTAR.tar.gz
if [ $INST_BZLIB -eq 1 ]
then
- if [ ! -e bzip2-1.0.6/done ]
+ if [ ! -e $BZLIB/done ]
then
- [ ! -e bzip2-1.0.6 ] && tar xfz bzip2-1.0.6.tar.gz
+ [ ! -e $BZLIB ] && tar xfz $BZLIB.tar.gz
echo "Installing BZLIB"
- cd bzip2-1.0.6
+ cd $BZLIB
if [ `uname` = "Darwin" ]
then
if [ -z "${CC}" ]
@@ -634,11 +667,11 @@
if [ $INST_ZLIB -eq 1 ]
then
- if [ ! -e zlib-1.2.7/done ]
+ if [ ! -e $ZLIB/done ]
then
- [ ! -e zlib-1.2.7 ] && tar xfz zlib-1.2.7.tar.gz
+ [ ! -e $ZLIB ] && tar xfz $ZLIB.tar.gz
echo "Installing ZLIB"
- cd zlib-1.2.7
+ cd $ZLIB
( ./configure --shared --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -652,11 +685,11 @@
if [ $INST_PNG -eq 1 ]
then
- if [ ! -e libpng-1.6.1/done ]
+ if [ ! -e $PNG/done ]
then
- [ ! -e libpng-1.6.1 ] && tar xfz libpng-1.6.1.tar.gz
+ [ ! -e $PNG ] && tar xfz $PNG.tar.gz
echo "Installing PNG"
- cd libpng-1.6.1
+ cd $PNG
( ./configure CPPFLAGS=-I${DEST_DIR}/include CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -670,13 +703,14 @@
if [ $INST_FTYPE -eq 1 ]
then
- if [ ! -e freetype-2.4.11/done ]
+ if [ ! -e $FREETYPE_VER/done ]
then
- [ ! -e freetype-2.4.11 ] && tar xfz freetype-2.4.11.tar.gz
+ [ ! -e $FREETYPE_VER ] && tar xfz $FREETYPE_VER.tar.gz
echo "Installing FreeType2"
- cd freetype-2.4.11
+ cd $FREETYPE_VER
( ./configure CFLAGS=-I${DEST_DIR}/include --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
- ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make 2>&1 ) 1>> ${LOG_FILE} || do_exit
+ ( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
@@ -688,11 +722,11 @@
if [ -z "$HDF5_DIR" ]
then
- if [ ! -e hdf5-1.8.9/done ]
+ if [ ! -e $HDF5/done ]
then
- [ ! -e hdf5-1.8.9 ] && tar xfz hdf5-1.8.9.tar.gz
+ [ ! -e $HDF5 ] && tar xfz $HDF5.tar.gz
echo "Installing HDF5"
- cd hdf5-1.8.9
+ cd $HDF5
( ./configure --prefix=${DEST_DIR}/ --enable-shared 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -707,11 +741,11 @@
if [ $INST_SQLITE3 -eq 1 ]
then
- if [ ! -e sqlite-autoconf-3071601/done ]
+ if [ ! -e $SQLITE/done ]
then
- [ ! -e sqlite-autoconf-3071601 ] && tar xfz sqlite-autoconf-3071601.tar.gz
+ [ ! -e $SQLITE ] && tar xfz $SQLITE.tar.gz
echo "Installing SQLite3"
- cd sqlite-autoconf-3071601
+ cd $SQLITE
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
@@ -720,11 +754,11 @@
fi
fi
-if [ ! -e Python-2.7.4/done ]
+if [ ! -e $PYTHON/done ]
then
echo "Installing Python. This may take a while, but don't worry. yt loves you."
- [ ! -e Python-2.7.4 ] && tar xfz Python-2.7.4.tgz
- cd Python-2.7.4
+ [ ! -e $PYTHON ] && tar xfz $PYTHON.tgz
+ cd $PYTHON
( ./configure --prefix=${DEST_DIR}/ ${PYCONF_ARGS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make ${MAKE_PROCS} 2>&1 ) 1>> ${LOG_FILE} || do_exit
@@ -739,7 +773,7 @@
if [ $INST_HG -eq 1 ]
then
- do_setup_py mercurial-2.5.4
+ do_setup_py $MERCURIAL
export HG_EXEC=${DEST_DIR}/bin/hg
else
# We assume that hg can be found in the path.
@@ -788,9 +822,9 @@
if [ $INST_SCIPY -eq 0 ]
then
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
+ do_setup_py $NUMPY ${NUMPY_ARGS}
else
- if [ ! -e scipy-0.11.0/done ]
+ if [ ! -e $SCIPY/done ]
then
if [ ! -e BLAS/done ]
then
@@ -798,17 +832,17 @@
echo "Building BLAS"
cd BLAS
gfortran -O2 -fPIC -fno-second-underscore -c *.f
- ar r libfblas.a *.o 1>> ${LOG_FILE}
+ ar r libfblas.a *.o &>> ${LOG_FILE}
ranlib libfblas.a 1>> ${LOG_FILE}
rm -rf *.o
touch done
cd ..
fi
- if [ ! -e lapack-3.4.2/done ]
+ if [ ! -e $LAPACK/done ]
then
- tar xfz lapack-3.4.2.tar.gz
+ tar xfz $LAPACK.tar.gz
echo "Building LAPACK"
- cd lapack-3.4.2/
+ cd $LAPACK/
cp INSTALL/make.inc.gfortran make.inc
make lapacklib OPTS="-fPIC -O2" NOOPT="-fPIC -O0" CFLAGS=-fPIC LDFLAGS=-fPIC 1>> ${LOG_FILE} || do_exit
touch done
@@ -816,9 +850,9 @@
fi
fi
export BLAS=$PWD/BLAS/libfblas.a
- export LAPACK=$PWD/lapack-3.4.2/liblapack.a
- do_setup_py numpy-1.7.0 ${NUMPY_ARGS}
- do_setup_py scipy-0.11.0 ${NUMPY_ARGS}
+ export LAPACK=$PWD/$LAPACK/liblapack.a
+ do_setup_py $NUMPY ${NUMPY_ARGS}
+ do_setup_py $SCIPY ${NUMPY_ARGS}
fi
if [ -n "${MPL_SUPP_LDFLAGS}" ]
@@ -840,10 +874,10 @@
echo "Setting CFLAGS ${CFLAGS}"
fi
# Now we set up the basedir for matplotlib:
-mkdir -p ${DEST_DIR}/src/matplotlib-1.2.1
-echo "[directories]" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/matplotlib-1.2.1/setup.cfg
-do_setup_py matplotlib-1.2.1
+mkdir -p ${DEST_DIR}/src/$MATPLOTLIB
+echo "[directories]" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+echo "basedirlist = ${DEST_DIR}" >> ${DEST_DIR}/src/$MATPLOTLIB/setup.cfg
+do_setup_py $MATPLOTLIB
if [ -n "${OLD_LDFLAGS}" ]
then
export LDFLAG=${OLD_LDFLAGS}
@@ -855,36 +889,36 @@
# Now we do our IPython installation, which has two optional dependencies.
if [ $INST_0MQ -eq 1 ]
then
- if [ ! -e zeromq-3.2.2/done ]
+ if [ ! -e $ZEROMQ/done ]
then
- [ ! -e zeromq-3.2.2 ] && tar xfz zeromq-3.2.2.tar.gz
+ [ ! -e $ZEROMQ ] && tar xfz $ZEROMQ.tar.gz
echo "Installing ZeroMQ"
- cd zeromq-3.2.2
+ cd $ZEROMQ
( ./configure --prefix=${DEST_DIR}/ 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make install 2>&1 ) 1>> ${LOG_FILE} || do_exit
( make clean 2>&1) 1>> ${LOG_FILE} || do_exit
touch done
cd ..
fi
- do_setup_py pyzmq-13.0.2 --zmq=${DEST_DIR}
- do_setup_py tornado-3.0
+ do_setup_py $PYZMQ --zmq=${DEST_DIR}
+ do_setup_py $TORNADO
fi
-do_setup_py ipython-0.13.1
-do_setup_py h5py-2.1.2
-do_setup_py Cython-0.18
-do_setup_py Forthon-0.8.11
-do_setup_py nose-1.2.1
-do_setup_py python-hglib-0.3
-do_setup_py sympy-0.7.2
-[ $INST_PYX -eq 1 ] && do_setup_py PyX-0.12.1
+do_setup_py $IPYTHON
+do_setup_py $H5PY
+do_setup_py $CYTHON
+do_setup_py $FORTHON
+do_setup_py $NOSE
+do_setup_py $PYTHON_HGLIB
+do_setup_py $SYMPY
+[ $INST_PYX -eq 1 ] && do_setup_py $PYX
# Now we build Rockstar and set its environment variable.
if [ $INST_ROCKSTAR -eq 1 ]
then
if [ ! -e Rockstar/done ]
then
- [ ! -e Rockstar ] && tar xfz rockstar-0.99.6.tar.gz
+ [ ! -e Rockstar ] && tar xfz $ROCKSTAR.tar.gz
echo "Building Rockstar"
cd Rockstar
( make lib 2>&1 ) 1>> ${LOG_FILE} || do_exit
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -190,8 +190,6 @@
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
-
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -201,8 +199,12 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
+ if "grids" in dir(pf.h):
+ sources = pf.h._get_objs("grids")
+ else:
+ sources = [pf.h.all_data()]
+ for g in sources:
+ if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -62,7 +62,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold009',
+ gold_standard_filename = 'gold010',
local_standard_filename = 'local001',
sketchfab_api_key = 'None',
thread_field_detection = 'False'
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/api.py
--- a/yt/data_objects/api.py
+++ b/yt/data_objects/api.py
@@ -89,3 +89,6 @@
from particle_trajectories import \
ParticleTrajectoryCollection
+
+from particle_filters import \
+ particle_filter
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -237,6 +237,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
@@ -400,6 +401,8 @@
ftype = self._current_particle_type
else:
ftype = self._current_fluid_type
+ if (ftype, fname) not in self.pf.field_info:
+ ftype = "gas"
if finfo.particle_type and ftype not in self.pf.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.pf.fluid_types:
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -231,33 +231,54 @@
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
-def _StarAngularMomentumVector(data):
+def _StarAngularMomentumVector(data, ftype=None):
"""
This function returns the mass-weighted average angular momentum vector
for stars.
"""
- is_star = data["creation_time"] > 0
- star_mass = data["ParticleMassMsun"][is_star]
- sLx = data["ParticleSpecificAngularMomentumX"][is_star]
- sLy = data["ParticleSpecificAngularMomentumY"][is_star]
- sLz = data["ParticleSpecificAngularMomentumZ"][is_star]
- amx = sLx * star_mass
- amy = sLy * star_mass
- amz = sLz * star_mass
+ if ftype is None:
+ is_star = data["creation_time"] > 0
+ star_mass = data["ParticleMassMsun"][is_star]
+ else:
+ is_star = Ellipsis
+ key = (ftype, "ParticleSpecificAngularMomentum%s")
+ j_mag = np.ones(3, dtype='f8')
+ for i, ax in enumerate("XYZ"):
+ j_mag[i] = data[key % ax][is_star]
+ j_mag[i] *= star_mass
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0,dtype=np.float64)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum(dtype=np.float64))
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
@@ -726,3 +747,28 @@
add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
combine_function=_combParticleDensityCenter,n_ret=2)
+
+def _HalfMass(data, field):
+ """
+ Cumulative sum the given mass field and find
+ at what radius the half mass is. Simple but
+ memory-expensive method.
+ """
+ d = np.nan_to_num(data[field])
+ r = data['Radius']
+ return d, r
+
+def _combHalfMass(data, field_vals, radii, frac=0.5):
+ fv = np.concatenate(field_vals.tolist()).ravel()
+ r = np.concatenate(radii.tolist()).ravel()
+ idx = np.argsort(r)
+ r = r[idx]
+ fv = np.cumsum(fv[idx])
+ idx, = np.where(fv / fv[-1] > frac)
+ if len(idx) > 0:
+ return r[idx[0]]
+ else:
+ return np.nan
+
+add_quantity("HalfMass",function=_HalfMass,
+ combine_function=_combHalfMass,n_ret=2)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -273,6 +273,8 @@
if hasattr(self.pf, "field_info"):
if not isinstance(item, tuple):
field = ("unknown", item)
+ finfo = self.pf._get_field_info(*field)
+ mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
finfo = self.pf._get_field_info(*field)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -45,6 +45,7 @@
def _AllFields(field, data):
v = []
for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
if ptype == "all" or \
ptype in data.pf.known_filters:
continue
@@ -57,6 +58,7 @@
def _AllFields(field, data):
v = []
for ptype in data.pf.particle_types:
+ data.pf._last_freq = (ptype, None)
if ptype == "all" or \
ptype in data.pf.known_filters:
continue
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/profiles.py
--- a/yt/data_objects/profiles.py
+++ b/yt/data_objects/profiles.py
@@ -183,6 +183,8 @@
# Get our bins
if log_space:
+ if lower_bound <= 0.0 or upper_bound <= 0.0:
+ raise YTIllDefinedBounds(lower_bound, upper_bound)
func = np.logspace
lower_bound, upper_bound = np.log10(lower_bound), np.log10(upper_bound)
else:
@@ -522,7 +524,10 @@
return [self.x_bin_field, self.y_bin_field]
def fix_bounds(upper, lower, logit):
- if logit: return np.log10(upper), np.log10(lower)
+ if logit:
+ if lower <= 0.0 or upper <= 0.0:
+ raise YTIllDefinedBounds(lower, upper)
+ return np.log10(upper), np.log10(lower)
return upper, lower
class BinnedProfile2DInlineCut(BinnedProfile2D):
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -687,6 +687,13 @@
yv = data["particle_velocity_y"] - bv[1]
zv = data["particle_velocity_z"] - bv[2]
return yv*z - zv*y
+add_field("ParticleSpecificAngularMomentumX",
+ function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumY(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -697,6 +704,13 @@
xv = data["particle_velocity_x"] - bv[0]
zv = data["particle_velocity_z"] - bv[2]
return -(xv*z - zv*x)
+add_field("ParticleSpecificAngularMomentumY",
+ function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumZ(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -707,14 +721,13 @@
xv = data["particle_velocity_x"] - bv[0]
yv = data["particle_velocity_y"] - bv[1]
return xv*y - yv*x
-for ax in 'XYZ':
- n = "ParticleSpecificAngularMomentum%s" % ax
- add_field(n, function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentum,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
- add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentumKMSMPC,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ",
+ function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleAngularMomentum(field, data):
return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
@@ -729,17 +742,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -870,6 +883,58 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return spht
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _ParticleRadialVelocity(field, data):
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
@@ -878,8 +943,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -898,8 +963,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
spht = get_sph_theta_component(vel, theta, phi, normal)
@@ -918,8 +983,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphp = get_sph_phi_component(vel, phi, normal)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -293,6 +293,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "baryon_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -306,6 +318,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "total_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
@@ -319,3 +343,15 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def multimass_mass(field, data):
+ rho = data["deposit", "multimass_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -992,6 +992,7 @@
self.sfc_start = sfc_start
self.sfc_end = sfc_end
+ @cython.cdivision(True)
cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
# Calculate the index
cdef int coords[3], i
@@ -1001,6 +1002,7 @@
sfc = artio_sfc_index(self.handle, coords)
return sfc
+ @cython.cdivision(True)
cdef void sfc_to_pos(self, np.int64_t sfc, np.float64_t pos[3]) nogil:
cdef int coords[3], i
artio_sfc_coords(self.handle, sfc, coords)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/frontends/flash/data_structures.py
--- a/yt/frontends/flash/data_structures.py
+++ b/yt/frontends/flash/data_structures.py
@@ -444,6 +444,12 @@
mylog.warning('Identical domain left edge and right edges '
'along dummy dimension (%i), attempting to read anyway' % d)
self.domain_right_edge[d] = self.domain_left_edge[d]+1.0
+ if self.dimensionality < 3 and self.geometry == "cylindrical":
+ mylog.warning("Extending theta dimension to 2PI + left edge.")
+ self.domain_right_edge[2] = self.domain_left_edge[2] + 2*np.pi
+ elif self.dimensionality < 3 and self.geometry == "polar":
+ mylog.warning("Extending theta dimension to 2PI + left edge.")
+ self.domain_right_edge[1] = self.domain_left_edge[1] + 2*np.pi
self.domain_dimensions = \
np.array([nblockx*nxb,nblocky*nyb,nblockz*nzb])
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/frontends/flash/io.py
--- a/yt/frontends/flash/io.py
+++ b/yt/frontends/flash/io.py
@@ -82,7 +82,9 @@
rv = {}
for field in fields:
ftype, fname = field
- rv[field] = np.empty(size, dtype=f["/%s" % fname].dtype)
+ dt = f["/%s" % fname].dtype
+ if dt == "float32": dt = "float64"
+ rv[field] = np.empty(size, dtype=dt)
ng = sum(len(c.objs) for c in chunks)
mylog.debug("Reading %s cells of %s fields in %s blocks",
size, [f2 for f1, f2 in fields], ng)
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -112,9 +112,9 @@
if self.cosmological_simulation:
for unit in mpc_conversion:
mpch['%sh' % unit] = mpch[unit] * self.hubble_constant
- mpch['%shcm' % unit] = (mpch["%sh" % unit] /
+ mpch['%shcm' % unit] = (mpch["%sh" % unit] *
(1 + self.current_redshift))
- mpch['%scm' % unit] = mpch[unit] / (1 + self.current_redshift)
+ mpch['%scm' % unit] = mpch[unit] * (1 + self.current_redshift)
# ud == unit destination
# ur == unit registry
for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/geometry/coordinate_handler.py
--- a/yt/geometry/coordinate_handler.py
+++ b/yt/geometry/coordinate_handler.py
@@ -189,8 +189,8 @@
class PolarCoordinateHandler(CoordinateHandler):
- def __init__(self, pf, ordering = 'rzt'):
- if ordering != 'rzt': raise NotImplementedError
+ def __init__(self, pf, ordering = 'rtz'):
+ if ordering != 'rtz': raise NotImplementedError
super(PolarCoordinateHandler, self).__init__(pf)
def coordinate_fields(self):
@@ -198,17 +198,18 @@
return PolarFieldInfo
def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
- raise NotImplementedError
- if dimension == 1:
+ ax_name = self.axis_name[dimension]
+ if ax_name in ('r', 'theta'):
return self._ortho_pixelize(data_source, field, bounds, size,
antialias)
- elif dimension == 2:
- return self._polar_pixelize(data_source, field, bounds, size,
+ elif ax_name == "z":
+ return self._cyl_pixelize(data_source, field, bounds, size,
antialias)
else:
# Pixelizing along a cylindrical surface is a bit tricky
raise NotImplementedError
+
def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
buff = _MPL.Pixelize(data_source['px'], data_source['py'],
data_source['pdx'], data_source['pdy'],
@@ -222,7 +223,80 @@
data_source['dr']/2.0,
data_source['theta'],
data_source['dtheta']/2.0,
- size[0], field, bounds[0])
+ size[0], data_source[field], bounds[0])
+ return buff
+
+ axis_name = { 0 : 'r', 1 : 'theta', 2 : 'z',
+ 'r' : 'r', 'theta' : 'theta', 'z' : 'z',
+ 'R' : 'r', 'Theta' : 'theta', 'Z' : 'z'}
+
+ axis_id = { 'r' : 0, 'theta' : 1, 'z' : 2,
+ 0 : 0, 1 : 1, 2 : 2}
+
+ x_axis = { 'r' : 1, 'theta' : 0, 'z' : 0,
+ 0 : 1, 1 : 0, 2 : 0}
+
+ y_axis = { 'r' : 2, 'theta' : 2, 'z' : 1,
+ 0 : 2, 1 : 2, 2 : 1}
+
+ def convert_from_cartesian(self, coord):
+ return cartesian_to_cylindrical(coord)
+
+ def convert_to_cartesian(self, coord):
+ return cylindrical_to_cartesian(coord)
+
+ def convert_to_cylindrical(self, coord):
+ return coord
+
+ def convert_from_cylindrical(self, coord):
+ return coord
+
+ def convert_to_spherical(self, coord):
+ raise NotImplementedError
+
+ def convert_from_spherical(self, coord):
+ raise NotImplementedError
+
+ @property
+ def period(self):
+ return np.array([0.0, 0.0, 2.0*np.pi])
+
+class CylindricalCoordinateHandler(CoordinateHandler):
+
+ def __init__(self, pf, ordering = 'rzt'):
+ if ordering != 'rzt': raise NotImplementedError
+ super(CylindricalCoordinateHandler, self).__init__(pf)
+
+ def coordinate_fields(self):
+ # return the fields for r, z, theta
+ return CylindricalFieldInfo
+
+ def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
+ ax_name = self.axis_name[dimension]
+ if ax_name in ('r', 'theta'):
+ return self._ortho_pixelize(data_source, field, bounds, size,
+ antialias)
+ elif ax_name == "z":
+ return self._cyl_pixelize(data_source, field, bounds, size,
+ antialias)
+ else:
+ # Pixelizing along a cylindrical surface is a bit tricky
+ raise NotImplementedError
+
+ def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
+ buff = _MPL.Pixelize(data_source['px'], data_source['py'],
+ data_source['pdx'], data_source['pdy'],
+ data_source[field], size[0], size[1],
+ bounds, int(antialias),
+ True, self.period).transpose()
+ return buff
+
+ def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
+ buff = pixelize_cylinder(data_source['r'],
+ data_source['dr']/2.0,
+ data_source['theta'],
+ data_source['dtheta']/2.0,
+ size[0], data_source[field], bounds[0])
return buff
axis_name = { 0 : 'r', 1 : 'z', 2 : 'theta',
@@ -260,76 +334,3 @@
def period(self):
return np.array([0.0, 0.0, 2.0*np.pi])
-class CylindricalCoordinateHandler(CoordinateHandler):
-
- def __init__(self, pf, ordering = 'rtz'):
- if ordering != 'rtz': raise NotImplementedError
- super(CylindricalCoordinateHandler, self).__init__(pf)
-
- def coordinate_fields(self):
- # return the fields for r, z, theta
- return CylindricalFieldInfo
-
- def pixelize(self, dimension, data_source, field, bounds, size, antialias = True):
- raise NotImplementedError
- if dimension == 1:
- return self._ortho_pixelize(data_source, field, bounds, size,
- antialias)
- elif dimension == 2:
- return self._cyl_pixelize(data_source, field, bounds, size,
- antialias)
- else:
- # Pixelizing along a cylindrical surface is a bit tricky
- raise NotImplementedError
-
- def _ortho_pixelize(self, data_source, field, bounds, size, antialias):
- buff = _MPL.Pixelize(data_source['px'], data_source['py'],
- data_source['pdx'], data_source['pdy'],
- data_source[field], size[0], size[1],
- bounds, int(antialias),
- True, self.period).transpose()
- return buff
-
- def _cyl_pixelize(self, data_source, field, bounds, size, antialias):
- buff = pixelize_cylinder(data_source['r'],
- data_source['dr']/2.0,
- data_source['theta'],
- data_source['dtheta']/2.0,
- size[0], field, bounds[0])
- return buff
-
- axis_name = { 0 : 'r', 1 : 'z', 2 : 'theta',
- 'r' : 'r', 'z' : 'z', 'theta' : 'theta',
- 'R' : 'r', 'Z' : 'z', 'Theta' : 'theta'}
-
- axis_id = { 'r' : 0, 'z' : 1, 'theta' : 2,
- 0 : 0, 1 : 1, 2 : 2}
-
- x_axis = { 'r' : 1, 'z' : 0, 'theta' : 0,
- 0 : 1, 1 : 0, 2 : 0}
-
- y_axis = { 'r' : 2, 'z' : 2, 'theta' : 1,
- 0 : 2, 1 : 2, 2 : 1}
-
- def convert_from_cartesian(self, coord):
- return cartesian_to_cylindrical(coord)
-
- def convert_to_cartesian(self, coord):
- return cylindrical_to_cartesian(coord)
-
- def convert_to_cylindrical(self, coord):
- return coord
-
- def convert_from_cylindrical(self, coord):
- return coord
-
- def convert_to_spherical(self, coord):
- raise NotImplementedError
-
- def convert_from_spherical(self, coord):
- raise NotImplementedError
-
- @property
- def period(self):
- return np.array([0.0, 0.0, 2.0*np.pi])
-
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/geometry/geometry_handler.py
--- a/yt/geometry/geometry_handler.py
+++ b/yt/geometry/geometry_handler.py
@@ -220,7 +220,6 @@
for pt in self.parameter_file.particle_types:
new_fi = copy.copy(finfo)
new_fi.name = (pt, new_fi.name)
- if new_fi.name in fi: continue
fi[new_fi.name] = new_fi
new_fields.append(new_fi.name)
fields_to_check += new_fields
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/geometry/particle_deposit.pyx
--- a/yt/geometry/particle_deposit.pyx
+++ b/yt/geometry/particle_deposit.pyx
@@ -218,11 +218,14 @@
ib0[i] = iclip(ib0[i], 0, dim[i] - 1)
ib1[i] = iclip(ib1[i], 0, dim[i] - 1)
for i from ib0[0] <= i <= ib1[0]:
- idist[0] = (ii[0] - i) * (ii[0] - i) * dds[0]
+ idist[0] = (ii[0] - i) * dds[0]
+ idist[0] *= idist[0]
for j from ib0[1] <= j <= ib1[1]:
- idist[1] = (ii[1] - j) * (ii[1] - j) * dds[1]
+ idist[1] = (ii[1] - j) * dds[1]
+ idist[1] *= idist[1]
for k from ib0[2] <= k <= ib1[2]:
- idist[2] = (ii[2] - k) * (ii[2] - k) * dds[2]
+ idist[2] = (ii[2] - k) * dds[2]
+ idist[2] *= idist[2]
dist = idist[0] + idist[1] + idist[2]
# Calculate distance in multiples of the smoothing length
dist = sqrt(dist) / fields[0]
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -64,7 +64,8 @@
ValidateParameter, ValidateDataField, ValidateProperty, \
ValidateSpatial, ValidateGridType, \
TimeSeriesData, AnalysisTask, analysis_task, \
- ParticleTrajectoryCollection, ImageArray
+ ParticleTrajectoryCollection, ImageArray, \
+ particle_filter
from yt.data_objects.derived_quantities import \
add_quantity, quantity_info
@@ -118,7 +119,8 @@
from yt.frontends.stream.api import \
StreamStaticOutput, StreamFieldInfo, add_stream_field, \
- StreamHandler, load_uniform_grid, load_amr_grids
+ StreamHandler, load_uniform_grid, load_amr_grids, \
+ load_particles
from yt.frontends.sph.api import \
OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1235,7 +1235,8 @@
lo.main( None, download=args.number )
class YTNotebookUploadCmd(YTCommand):
- args = (dict(short="file", type=str),)
+ args = (dict(short="file", type=str),
+ dict(long="--title", short="-t", type=str, default = None))
description = \
"""
Upload an IPython notebook to hub.yt-project.org.
@@ -1254,6 +1255,11 @@
t = json.loads(open(filename).read())['metadata']['name']
except (ValueError, KeyError):
print "File does not appear to be an IPython notebook."
+ if args.title is not None:
+ t = args.title
+ if len(t) == 0:
+ print "You need to specify a title with --title ."
+ return 1
from yt.utilities.minimal_representation import MinimalNotebook
mn = MinimalNotebook(filename, t)
rv = mn.upload()
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/utilities/exceptions.py
--- a/yt/utilities/exceptions.py
+++ b/yt/utilities/exceptions.py
@@ -287,3 +287,14 @@
def __str__(self):
return "Filter '%s' ill-defined. Applied to shape %s but is shape %s." % (
self.filter, self.s1, self.s2)
+
+class YTIllDefinedBounds(YTException):
+ def __init__(self, lb, ub):
+ self.lb = lb
+ self.ub = ub
+
+ def __str__(self):
+ v = "The bounds %0.3e and %0.3e are ill-defined. " % (self.lb, self.ub)
+ v += "Typically this happens when a log binning is specified "
+ v += "and zero or negative values are given for the bounds."
+ return v
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/utilities/lib/misc_utilities.pyx
--- a/yt/utilities/lib/misc_utilities.pyx
+++ b/yt/utilities/lib/misc_utilities.pyx
@@ -378,6 +378,8 @@
raise KeyError
@cython.cdivision(True)
+ at cython.boundscheck(False)
+ at cython.wraparound(False)
def pixelize_cylinder(np.ndarray[np.float64_t, ndim=1] radius,
np.ndarray[np.float64_t, ndim=1] dradius,
np.ndarray[np.float64_t, ndim=1] theta,
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/visualization/volume_rendering/camera.py
--- a/yt/visualization/volume_rendering/camera.py
+++ b/yt/visualization/volume_rendering/camera.py
@@ -2034,6 +2034,9 @@
return temp_weightfield
pf.field_info.add_field("temp_weightfield",
function=_make_wf(field, weight))
+ # Now we have to tell the parameter file to add it and to calculate its
+ # dependencies..
+ pf.h._derived_fields_add(["temp_weightfield"], [])
fields = ["temp_weightfield", weight]
nv = 12*nside**2
image = np.zeros((nv,1,4), dtype='float64', order='C')
@@ -2083,6 +2086,7 @@
else:
image[:,:,0] /= image[:,:,1]
pf.field_info.pop("temp_weightfield")
+ pf.field_dependencies.pop("temp_weightfield")
for g in pf.h.grids:
if "temp_weightfield" in g.keys():
del g["temp_weightfield"]
@@ -2133,6 +2137,9 @@
return temp_weightfield
pf.field_info.add_field("temp_weightfield",
function=_make_wf(self.field, self.weight))
+ # Now we have to tell the parameter file to add it and to calculate
+ # its dependencies..
+ pf.h._derived_fields_add(["temp_weightfield"], [])
fields = ["temp_weightfield", self.weight]
self.fields = fields
@@ -2327,6 +2334,7 @@
image = projcam.snapshot()
if weight is not None:
pf.field_info.pop("temp_weightfield")
+ pf.field_dependencies.pop("temp_weightfield")
del projcam
return image[:,:]
diff -r e3e35f832c2c0deafd7d2db689f4a311043778fd -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 yt/visualization/volume_rendering/transfer_functions.py
--- a/yt/visualization/volume_rendering/transfer_functions.py
+++ b/yt/visualization/volume_rendering/transfer_functions.py
@@ -236,8 +236,15 @@
this is fine. However, more complicated schema are possible by using
this object. For instance, density-weighted emission that produces
colors based on the temperature of the fluid.
+
+ Parameters
+ ----------
+ grey_opacity : bool
+ Should opacity be calculated on a channel-by-channel basis, or
+ overall? Useful for opaque renderings. Default: False
+
"""
- def __init__(self):
+ def __init__(self, grey_opacity=False):
self.n_field_tables = 0
self.tables = [] # Tables are interpolation tables
self.field_ids = [0] * 6 # This correlates fields with tables
@@ -246,6 +253,7 @@
self.weight_table_ids = [-1] * 6
self.grad_field = -1
self.light_source_v = self.light_source_c = np.zeros(3, 'float64')
+ self.grey_opacity = grey_opacity
def add_field_table(self, table, field_id, weight_field_id = -1,
weight_table_id = -1):
@@ -838,6 +846,7 @@
self.link_channels(i+3, i+3)
self._normalize()
+ self.grey_opacity = False
def _normalize(self):
fmax = np.array([f.y for f in self.tables[:3]])
https://bitbucket.org/yt_analysis/yt-3.0/commits/cf6a41ce2257/
Changeset: cf6a41ce2257
Branch: yt-3.0
User: juxtaposicion
Date: 2013-08-22 08:41:28
Summary: fixing up R* a bit
Affected #: 1 file
diff -r 7eab13f45a1ba1ca353b0e44c05a2695b04e1686 -r cf6a41ce2257ba01f7e1e1620a225c05db520775 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -204,6 +204,7 @@
else:
sources = [pf.h.all_data()]
for g in sources:
+ if getattr(g, 'NumberOfParticles', 1) == 0: continue
if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
https://bitbucket.org/yt_analysis/yt-3.0/commits/1a643ea27844/
Changeset: 1a643ea27844
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 20:31:28
Summary: Merging Chris's work on Rockstar and fields.
Affected #: 7 files
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -206,8 +206,6 @@
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
-
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -217,8 +215,13 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
+ if "grids" in dir(pf.h):
+ sources = pf.h._get_objs("grids")
+ else:
+ sources = [pf.h.all_data()]
+ for g in sources:
+ if getattr(g, 'NumberOfParticles', 1) == 0: continue
+ if len(g['particle_position_x']) == 0: continue
if (rh.dm_only or (not has_particle_type)):
if rh.hires_only:
iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -230,6 +230,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
@@ -397,6 +398,8 @@
ftype = self._current_particle_type
else:
ftype = self._current_fluid_type
+ if (ftype, fname) not in self.pf.field_info:
+ ftype = "gas"
if finfo.particle_type and ftype not in self.pf.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.pf.fluid_types:
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -221,33 +221,54 @@
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
-def _StarAngularMomentumVector(data):
+def _StarAngularMomentumVector(data, ftype=None):
"""
This function returns the mass-weighted average angular momentum vector
for stars.
"""
- is_star = data["creation_time"] > 0
- star_mass = data["ParticleMassMsun"][is_star]
- sLx = data["ParticleSpecificAngularMomentumX"][is_star]
- sLy = data["ParticleSpecificAngularMomentumY"][is_star]
- sLz = data["ParticleSpecificAngularMomentumZ"][is_star]
- amx = sLx * star_mass
- amy = sLy * star_mass
- amz = sLz * star_mass
+ if ftype is None:
+ is_star = data["creation_time"] > 0
+ star_mass = data["ParticleMassMsun"][is_star]
+ else:
+ is_star = Ellipsis
+ key = (ftype, "ParticleSpecificAngularMomentum%s")
+ j_mag = np.ones(3, dtype='f8')
+ for i, ax in enumerate("XYZ"):
+ j_mag[i] = data[key % ax][is_star]
+ j_mag[i] *= star_mass
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0,dtype=np.float64)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum(dtype=np.float64))
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
@@ -716,3 +737,28 @@
add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
combine_function=_combParticleDensityCenter,n_ret=2)
+
+def _HalfMass(data, field):
+ """
+ Cumulative sum the given mass field and find
+ at what radius the half mass is. Simple but
+ memory-expensive method.
+ """
+ d = np.nan_to_num(data[field])
+ r = data['Radius']
+ return d, r
+
+def _combHalfMass(data, field_vals, radii, frac=0.5):
+ fv = np.concatenate(field_vals.tolist()).ravel()
+ r = np.concatenate(radii.tolist()).ravel()
+ idx = np.argsort(r)
+ r = r[idx]
+ fv = np.cumsum(fv[idx])
+ idx, = np.where(fv / fv[-1] > frac)
+ if len(idx) > 0:
+ return r[idx[0]]
+ else:
+ return np.nan
+
+add_quantity("HalfMass",function=_HalfMass,
+ combine_function=_combHalfMass,n_ret=2)
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -263,6 +263,8 @@
if hasattr(self.pf, "field_info"):
if not isinstance(item, tuple):
field = ("unknown", item)
+ finfo = self.pf._get_field_info(*field)
+ mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
finfo = self.pf._get_field_info(*field)
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -675,6 +675,13 @@
yv = data["particle_velocity_y"] - bv[1]
zv = data["particle_velocity_z"] - bv[2]
return yv*z - zv*y
+add_field("ParticleSpecificAngularMomentumX",
+ function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumY(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -685,6 +692,13 @@
xv = data["particle_velocity_x"] - bv[0]
zv = data["particle_velocity_z"] - bv[2]
return -(xv*z - zv*x)
+add_field("ParticleSpecificAngularMomentumY",
+ function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumZ(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -695,14 +709,13 @@
xv = data["particle_velocity_x"] - bv[0]
yv = data["particle_velocity_y"] - bv[1]
return xv*y - yv*x
-for ax in 'XYZ':
- n = "ParticleSpecificAngularMomentum%s" % ax
- add_field(n, function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentum,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
- add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentumKMSMPC,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ",
+ function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleAngularMomentum(field, data):
return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
@@ -717,17 +730,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -858,6 +871,58 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return spht
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _ParticleRadialVelocity(field, data):
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
@@ -866,8 +931,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -886,8 +951,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
spht = get_sph_theta_component(vel, theta, phi, normal)
@@ -906,8 +971,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphp = get_sph_phi_component(vel, phi, normal)
diff -r 7cf87ba3a876db650437a3b9c5d21cb4dc6095aa -r 1a643ea27844705fd0567b621e3a90efbfff68d4 yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -283,6 +283,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "baryon_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -296,6 +308,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "total_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
@@ -309,3 +333,15 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def multimass_mass(field, data):
+ rho = data["deposit", "multimass_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
https://bitbucket.org/yt_analysis/yt-3.0/commits/b08c3f479670/
Changeset: b08c3f479670
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 21:12:00
Summary: Adding particle_ones field.
Affected #: 1 file
diff -r 1a643ea27844705fd0567b621e3a90efbfff68d4 -r b08c3f479670e0af9f6eee36a7025f60ab58c8c2 yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -114,6 +114,14 @@
# Now some translation functions.
+ def particle_ones(field, data):
+ return np.ones(data[ptype, mass_name].shape, dtype="float64")
+
+ registry.add_field((ptype, "particle_ones"),
+ function = particle_ones,
+ particle_type = True,
+ units = "")
+
registry.add_field((ptype, "ParticleMass"),
function = TranslationFunc((ptype, mass_name)),
particle_type = True,
https://bitbucket.org/yt_analysis/yt-3.0/commits/3b517c190636/
Changeset: 3b517c190636
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 21:12:18
Summary: Re-work Rockstar interface to be more friendly to yt 3.0.
Affected #: 2 files
diff -r b08c3f479670e0af9f6eee36a7025f60ab58c8c2 -r 3b517c190636d3187ff41c108864a8fb6e4b62df yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -132,9 +132,9 @@
outbase: str
This is where the out*list files that Rockstar makes should be
placed. Default is 'rockstar_halos'.
- dm_type: 1
- In order to exclude stars and other particle types, define
- the dm_type. Default is 1, as Enzo has the DM particle type=1.
+ particle_type: str
+ This is the "particle type" that can be found in the data. This can be
+ a filtered particle or an inherent type.
force_res: float
This parameter specifies the force resolution that Rockstar uses
in units of Mpc/h.
@@ -144,23 +144,17 @@
longest) in the time series:
``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
total_particles : int
- If supplied, this is a pre-calculated total number of dark matter
- particles present in the simulation. For example, this is useful
- when analyzing a series of snapshots where the number of dark
- matter particles should not change and this will save some disk
- access time. If left unspecified, it will
- be calculated automatically. Default: ``None``.
- dm_only : boolean
- If set to ``True``, it will be assumed that there are only dark
- matter particles present in the simulation. This can save analysis
- time if this is indeed the case. Default: ``False``.
- hires_dm_mass : float
- If supplied, use only the highest resolution dark matter
- particles, with a mass less than (1.1*hires_dm_mass), in units
- of ParticleMassMsun. This is useful for multi-dm-mass
- simulations. Note that this will only give sensible results for
- halos that are not "polluted" by lower resolution
- particles. Default: ``None``.
+ If supplied, this is a pre-calculated total number of particles present
+ in the simulation. For example, this is useful when analyzing a series
+ of snapshots where the number of dark matter particles should not
+ change and this will save some disk access time. If left unspecified,
+ it will be calculated automatically. Default: ``None``.
+ particle_mass : float
+ If supplied, use this as the particle mass supplied to rockstar.
+ Otherwise, the smallest particle mass will be identified and calculated
+ internally. This is useful for multi-dm-mass simulations. Note that
+ this will only give sensible results for halos that are not "polluted"
+ by lower resolution particles. Default: ``None``.
Returns
-------
@@ -183,9 +177,9 @@
rh.run()
"""
def __init__(self, ts, num_readers = 1, num_writers = None,
- outbase="rockstar_halos", dm_type=1,
+ outbase="rockstar_halos", particle_type="all",
force_res=None, total_particles=None, dm_only=False,
- hires_dm_mass=None):
+ particle_mass=None):
mylog.warning("The citation for the Rockstar halo finder can be found at")
mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
ParallelAnalysisInterface.__init__(self)
@@ -204,7 +198,7 @@
if not isinstance(ts, TimeSeriesData):
ts = TimeSeriesData([ts])
self.ts = ts
- self.dm_type = dm_type
+ self.particle_type = particle_type
self.outbase = outbase
if force_res is None:
tpf = ts[-1] # Cache a reference
@@ -215,7 +209,7 @@
self.force_res = force_res
self.total_particles = total_particles
self.dm_only = dm_only
- self.hires_dm_mass = hires_dm_mass
+ self.particle_mass = particle_mass
# Setup pool and workgroups.
self.pool, self.workgroup = self.runner.setup_pool()
p = self._setup_parameters(ts)
@@ -226,63 +220,30 @@
def _setup_parameters(self, ts):
if self.workgroup.name != "readers": return None
tpf = ts[0]
+ ptype = self.particle_type
- def _particle_count(field, data):
- if data.NumberOfParticles == 0: return 0
- try:
- data["particle_type"]
- has_particle_type=True
- except KeyError:
- has_particle_type=False
-
- if (self.dm_only or (not has_particle_type)):
- if self.hires_dm_mass is None:
- return np.prod(data["particle_position_x"].shape)
- else:
- return (data['ParticleMassMsun'] < self.hires_dm_mass*1.1).sum()
- elif has_particle_type:
- if self.hires_dm_mass is None:
- return (data["particle_type"]==self.dm_type).sum()
- else:
- return ( (data["particle_type"]==self.dm_type) &
- (data['ParticleMassMsun'] < self.hires_dm_mass*1.1) ).sum()
- else:
- raise RuntimeError() # should never get here
-
- add_field("particle_count", function=_particle_count,
- not_in_all=True, particle_type=True)
dd = tpf.h.all_data()
# Get DM particle mass.
all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
has_particle_type = ("particle_type" in all_fields)
- if self.hires_dm_mass is None:
- for g in tpf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
-
- if (self.dm_only or (not has_particle_type)):
- iddm = Ellipsis
- elif has_particle_type:
- iddm = g["particle_type"] == self.dm_type
- else:
- iddm = Ellipsis # should never get here
-
- particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
- break
- else:
- particle_mass = self.hires_dm_mass / tpf.hubble_constant
+ particle_mass = self.particle_mass
+ if particle_mass is None:
+ particle_mass = dd.quantities["Extrema"](
+ (ptype, "ParticleMassMsun"), non_zero = True)[0][0]
+ particle_mass /= tpf.hubble_constant
p = {}
if self.total_particles is None:
# Get total_particles in parallel.
- p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+ tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
+ p['total_particles'] = int(tp)
p['left_edge'] = tpf.domain_left_edge
p['right_edge'] = tpf.domain_right_edge
p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
- p['particle_mass'] = particle_mass
+ p['particle_mass'] = self.particle_mass = particle_mass
return p
-
def __del__(self):
try:
self.pool.free_all()
@@ -306,7 +267,7 @@
(server_address, port))
self.port = str(self.port)
- def run(self, block_ratio = 1,**kwargs):
+ def run(self, block_ratio = 1, callbacks = None):
"""
"""
@@ -315,7 +276,8 @@
self._get_hosts()
self.handler.setup_rockstar(self.server_address, self.port,
len(self.ts), self.total_particles,
- self.dm_type,
+ self.particle_type,
+ particle_mass = self.particle_mass,
parallel = self.comm.size > 1,
num_readers = self.num_readers,
num_writers = self.num_writers,
@@ -323,10 +285,7 @@
block_ratio = block_ratio,
outbase = self.outbase,
force_res = self.force_res,
- particle_mass = float(self.particle_mass),
- dm_only = int(self.dm_only),
- hires_only = (self.hires_dm_mass is not None),
- **kwargs)
+ callbacks = callbacks)
# Make the directory to store the halo lists in.
if not self.outbase:
self.outbase = os.getcwd()
@@ -357,4 +316,4 @@
Reads in the out_0.list file and generates RockstarHaloList
and RockstarHalo objects.
"""
- return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+ return RockstarHaloList(self.ts[0], self.outbase+'/%s'%file_name)
diff -r b08c3f479670e0af9f6eee36a7025f60ab58c8c2 -r 3b517c190636d3187ff41c108864a8fb6e4b62df yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -179,30 +179,15 @@
SCALE_NOW = 1.0/(pf.current_redshift+1.0)
# Now we want to grab data from only a subset of the grids for each reader.
all_fields = set(pf.h.derived_field_list + pf.h.field_list)
- has_particle_type = ("particle_type" in all_fields)
# First we need to find out how many this reader is going to read in
# if the number of readers > 1.
+ dd = pf.h.all_data()
+
if NUM_BLOCKS > 1:
local_parts = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
- if (rh.dm_only or (not has_particle_type)):
- if rh.hires_only:
- iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
- else:
- iddm = Ellipsis
- elif has_particle_type:
- if rh.hires_only:
- iddm = ( (g["particle_type"]==rh.dm_type) &
- (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )
- else:
- iddm = g["particle_type"] == rh.dm_type
- else:
- iddm = Ellipsis # should never get here
- arri = g["particle_index"].astype("int64")
- arri = arri[iddm] #pick only DM
- local_parts += arri.size
+ for chunk in dd.chunks([(rh.particle_type, "particle_ones")], "io"):
+ local_parts += chunk[rh.particle_type, "particle_ones"].sum()
else:
local_parts = TOTAL_PARTICLES
@@ -215,28 +200,12 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- if "grids" in dir(pf.h):
- sources = pf.h._get_objs("grids")
- else:
- sources = [pf.h.all_data()]
- for g in sources:
- if getattr(g, 'NumberOfParticles', 1) == 0: continue
- if len(g['particle_position_x']) == 0: continue
- if (rh.dm_only or (not has_particle_type)):
- if rh.hires_only:
- iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
- else:
- iddm = Ellipsis
- elif has_particle_type:
- if rh.hires_only:
- iddm = ( (g["particle_type"]==rh.dm_type) &
- (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )
- else:
- iddm = g["particle_type"] == rh.dm_type
- else:
- iddm = Ellipsis # should never get here
- arri = g["particle_index"].astype("int64")
- arri = arri[iddm] #pick only DM
+ fields = [ (rh.particle_type, f) for f in
+ ["particle_position_%s" % ax for ax in 'xyz'] +
+ ["particle_velocity_%s" % ax for ax in 'xyz'] +
+ ["particle_index"]]
+ for chunk in dd.chunks(fields, "io"):
+ arri = chunk[rh.particle_type, "particle_index"]
npart = arri.size
for i in range(npart):
p[0][i+pi].id = arri[i]
@@ -245,8 +214,7 @@
"particle_position_z",
"particle_velocity_x", "particle_velocity_y",
"particle_velocity_z"]:
- arr = g[field].astype("float64")
- arr = arr[iddm] #pick DM
+ arr = chunk[rh.particle_type, field].astype("float64")
for i in range(npart):
p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
fi += 1
@@ -261,10 +229,8 @@
cdef int rank
cdef int size
cdef public int block_ratio
- cdef public int dm_type
+ cdef public object particle_type
cdef public int total_particles
- cdef public int dm_only
- cdef public int hires_only
cdef public object callbacks
def __cinit__(self, ts):
@@ -273,14 +239,13 @@
def setup_rockstar(self, char *server_address, char *server_port,
int num_snaps, np.int64_t total_particles,
- int dm_type,
+ particle_type,
np.float64_t particle_mass,
int parallel = False, int num_readers = 1,
int num_writers = 1,
int writing_port = -1, int block_ratio = 1,
int periodic = 1, force_res=None,
int min_halo_size = 25, outbase = "None",
- int dm_only = 0, int hires_only = False,
callbacks = None):
global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
@@ -311,8 +276,6 @@
NUM_BLOCKS = num_readers
MIN_HALO_OUTPUT_SIZE=min_halo_size
self.block_ratio = block_ratio
- self.dm_only = dm_only
- self.hires_only = hires_only
tpf = self.ts[0]
h0 = tpf.hubble_constant
https://bitbucket.org/yt_analysis/yt-3.0/commits/f01e59c55c26/
Changeset: f01e59c55c26
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 21:30:42
Summary: Continuing to implement Rockstar for yt-3.0.
Affected #: 2 files
diff -r 3b517c190636d3187ff41c108864a8fb6e4b62df -r f01e59c55c261db0ce9c2f8295b862104a956103 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -18,6 +18,8 @@
cimport numpy as np
cimport cython
from libc.stdlib cimport malloc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_objects
from yt.config import ytcfg
@@ -171,8 +173,8 @@
cdef np.ndarray[np.int64_t, ndim=1] arri
cdef np.ndarray[np.float64_t, ndim=1] arr
cdef unsigned long long pi,fi,i
+ cdef np.int64_t local_parts = 0
pf = rh.tsl.next()
- print 'reading from particle filename %s: %s'%(filename,pf.basename)
block = int(str(filename).rsplit(".")[-1])
n = rh.block_ratio
@@ -186,7 +188,8 @@
if NUM_BLOCKS > 1:
local_parts = 0
- for chunk in dd.chunks([(rh.particle_type, "particle_ones")], "io"):
+ for chunk in parallel_objects(
+ dd.chunks([(rh.particle_type, "particle_ones")], "io")):
local_parts += chunk[rh.particle_type, "particle_ones"].sum()
else:
local_parts = TOTAL_PARTICLES
@@ -204,11 +207,12 @@
["particle_position_%s" % ax for ax in 'xyz'] +
["particle_velocity_%s" % ax for ax in 'xyz'] +
["particle_index"]]
- for chunk in dd.chunks(fields, "io"):
- arri = chunk[rh.particle_type, "particle_index"]
+ for chunk in parallel_objects(dd.chunks(fields, "io")):
+ arri = np.asarray(chunk[rh.particle_type, "particle_index"],
+ dtype="int64")
npart = arri.size
for i in range(npart):
- p[0][i+pi].id = arri[i]
+ p[0][i+pi].id = <np.int64_t> arri[i]
fi = 0
for field in ["particle_position_x", "particle_position_y",
"particle_position_z",
@@ -275,7 +279,9 @@
NUM_WRITERS = num_writers
NUM_BLOCKS = num_readers
MIN_HALO_OUTPUT_SIZE=min_halo_size
+ TOTAL_PARTICLES = total_particles
self.block_ratio = block_ratio
+ self.particle_type = particle_type
tpf = self.ts[0]
h0 = tpf.hubble_constant
diff -r 3b517c190636d3187ff41c108864a8fb6e4b62df -r f01e59c55c261db0ce9c2f8295b862104a956103 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -117,7 +117,7 @@
for fname in ["Coordinates", "Velocities", "ParticleIDs",
# Note: Mass, not Masses
- "Mass"]:
+ "Mass", "particle_index"]:
func = _field_concat(fname)
GadgetFieldInfo.add_field(("all", fname), function=func,
particle_type = True)
@@ -136,6 +136,11 @@
particle_scalar_functions(ptype, "Coordinates", "Velocities", GadgetFieldInfo)
KnownGadgetFields.add_field((ptype, "Coordinates"), function=NullFunc,
particle_type = True)
+
+ # Now we add some translations.
+ GadgetFieldInfo.add_field( (ptype, "particle_index"),
+ function = TranslationFunc((ptype, "ParticleIDs")),
+ particle_type = True)
particle_deposition_functions("all", "Coordinates", "Mass", GadgetFieldInfo)
# Now we have to manually apply the splits for "all", since we don't want to
https://bitbucket.org/yt_analysis/yt-3.0/commits/2ce570734217/
Changeset: 2ce570734217
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 21:52:50
Summary: Sometimes we get back zero-particle halos. Skip these.
Affected #: 1 file
diff -r f01e59c55c261db0ce9c2f8295b862104a956103 -r 2ce570734217cbe3d3f313ffa5cfd3c9ece23c65 yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -160,6 +160,8 @@
cdef class RockstarInterface
cdef void rh_analyze_halo(halo *h, particle *hp):
+ # I don't know why, but sometimes we get halos with 0 particles.
+ if h.num_p == 0: return
cdef particleflat[:] pslice
pslice = <particleflat[:h.num_p]> (<particleflat *>hp)
parray = np.asarray(pslice)
@@ -174,7 +176,7 @@
cdef np.ndarray[np.float64_t, ndim=1] arr
cdef unsigned long long pi,fi,i
cdef np.int64_t local_parts = 0
- pf = rh.tsl.next()
+ pf = rh.pf = rh.tsl.next()
block = int(str(filename).rsplit(".")[-1])
n = rh.block_ratio
@@ -230,6 +232,7 @@
cdef public object data_source
cdef public object ts
cdef public object tsl
+ cdef public object pf
cdef int rank
cdef int size
cdef public int block_ratio
https://bitbucket.org/yt_analysis/yt-3.0/commits/8bd72c29599e/
Changeset: 8bd72c29599e
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-27 23:36:06
Summary: Merging HDF5 Gadget breanch in.
Affected #: 3 files
diff -r 2ce570734217cbe3d3f313ffa5cfd3c9ece23c65 -r 8bd72c29599eb8c64b746154ca28280f0ec6c046 yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -40,6 +40,8 @@
KnownOWLSFields, \
GadgetFieldInfo, \
KnownGadgetFields, \
+ GadgetHDF5FieldInfo, \
+ KnownGadgetHDF5Fields, \
TipsyFieldInfo, \
KnownTipsyFields
@@ -100,6 +102,8 @@
mpch['%shcm' % unit] = (mpch["%sh" % unit] *
(1 + self.current_redshift))
mpch['%scm' % unit] = mpch[unit] * (1 + self.current_redshift)
+ elif 'cmcm' in unit_base:
+ unit_base['cm'] = self.units['cm'] = unit_base.pop("cmcm")
# ud == unit destination
# ur == unit registry
for ud, ur in [(self.units, mpch), (self.time_units, sec_conversion)]:
@@ -142,22 +146,28 @@
def __init__(self, filename, data_style="gadget_binary",
additional_fields=(),
unit_base=None, n_ref=64,
- over_refine_factor=1):
+ over_refine_factor=1,
+ bounding_box = None):
self.n_ref = n_ref
self.over_refine_factor = over_refine_factor
self.storage_filename = None
if unit_base is not None and "UnitLength_in_cm" in unit_base:
# We assume this is comoving, because in the absence of comoving
# integration the redshift will be zero.
- unit_base['cmcm'] = unit_base["UnitLength_in_cm"]
+ unit_base['cmcm'] = 1.0 / unit_base["UnitLength_in_cm"]
self._unit_base = unit_base
+ if bounding_box is not None:
+ bbox = np.array(bounding_box, dtype="float64")
+ self.domain_left_edge = bbox[:,0]
+ self.domain_right_edge = bbox[:,1]
+ else:
+ self.domain_left_edge = self.domain_right_edge = None
super(GadgetStaticOutput, self).__init__(filename, data_style)
def __repr__(self):
return os.path.basename(self.parameter_filename).split(".")[0]
- def _parse_parameter_file(self):
-
+ def _get_hvals(self):
# The entries in this header are capitalized and named to match Table 4
# in the GADGET-2 user guide.
@@ -166,6 +176,11 @@
for i in hvals:
if len(hvals[i]) == 1:
hvals[i] = hvals[i][0]
+ return hvals
+
+ def _parse_parameter_file(self):
+
+ hvals = self._get_hvals()
self.dimensionality = 3
self.refine_by = 2
@@ -174,8 +189,10 @@
int(os.stat(self.parameter_filename)[stat.ST_CTIME])
# Set standard values
- self.domain_left_edge = np.zeros(3, "float64")
- self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
+ # We may have an overridden bounding box.
+ if self.domain_left_edge is None:
+ self.domain_left_edge = np.zeros(3, "float64")
+ self.domain_right_edge = np.ones(3, "float64") * hvals["BoxSize"]
nz = 1 << self.over_refine_factor
self.domain_dimensions = np.ones(3, "int32") * nz
self.periodicity = (True, True, True)
@@ -221,9 +238,10 @@
self.file_count = hvals["NumFiles"]
- f.close()
-
def _set_units(self):
+ if self._unit_base is None and self.cosmological_simulation == 1:
+ mylog.info("Assuming length units are in Mpc/h (comoving)")
+ self._unit_base = dict(mpchcm = 1.0)
super(GadgetStaticOutput, self)._set_units()
length_unit = self.units['cm']
unit_base = self._unit_base or {}
@@ -322,6 +340,43 @@
pass
return False
+class GadgetHDF5StaticOutput(GadgetStaticOutput):
+ _file_class = ParticleFile
+ _fieldinfo_fallback = GadgetHDF5FieldInfo
+ _fieldinfo_known = KnownGadgetHDF5Fields
+
+ def __init__(self, filename, data_style="gadget_hdf5",
+ unit_base = None, n_ref=64,
+ over_refine_factor=1,
+ bounding_box = None):
+ self.storage_filename = None
+ filename = os.path.abspath(filename)
+ super(GadgetHDF5StaticOutput, self).__init__(
+ filename, data_style, unit_base=unit_base, n_ref=n_ref,
+ over_refine_factor=over_refine_factor,
+ bounding_box = bounding_box)
+
+ def _get_hvals(self):
+ handle = h5py.File(self.parameter_filename, mode="r")
+ hvals = {}
+ hvals.update((str(k), v) for k, v in handle["/Header"].attrs.items())
+ # Compat reasons.
+ hvals["NumFiles"] = hvals["NumFilesPerSnapshot"]
+ hvals["Massarr"] = hvals["MassTable"]
+ return hvals
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ try:
+ fileh = h5py.File(args[0], mode='r')
+ if "Constants" not in fileh["/"].keys() and \
+ "Header" in fileh["/"].keys():
+ fileh.close()
+ return True
+ fileh.close()
+ except:
+ pass
+ return False
class TipsyFile(ParticleFile):
diff -r 2ce570734217cbe3d3f313ffa5cfd3c9ece23c65 -r 8bd72c29599eb8c64b746154ca28280f0ec6c046 yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -47,6 +47,9 @@
KnownGadgetFields = FieldInfoContainer()
add_gadget_field = KnownGadgetFields.add_field
+GadgetHDF5FieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
+KnownGadgetHDF5Fields = FieldInfoContainer()
+
TipsyFieldInfo = FieldInfoContainer.create_with_fallback(FieldInfo)
add_Tipsy_field = TipsyFieldInfo.add_field
@@ -98,60 +101,72 @@
# Among other things we need to set up Coordinates
+def _setup_gadget_fields(ptypes, field_registry, known_registry):
+
+ # This has to be done manually for Gadget, because some of the particles will
+ # have uniform mass
+ def _gadget_particle_fields(ptype):
+ def _Mass(field, data):
+ pind = ptypes.index(ptype)
+ if data.pf["Massarr"][pind] == 0.0:
+ return data[ptype, "Masses"].copy()
+ mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
+ # Note that this is an alias, which is why we need to apply conversion
+ # here. Otherwise we'd have an asymmetry.
+ mass *= data.pf["Massarr"][pind] * data.convert("mass")
+ return mass
+ field_registry.add_field((ptype, "Mass"), function=_Mass,
+ particle_type = True)
+
+ for fname in ["Coordinates", "Velocities", "ParticleIDs",
+ # Note: Mass, not Masses
+ "Mass", "particle_index"]:
+ func = _field_concat(fname)
+ field_registry.add_field(("all", fname), function=func,
+ particle_type = True)
+
+ for ptype in ptypes:
+ known_registry.add_field((ptype, "Masses"), function=NullFunc,
+ particle_type = True,
+ convert_function=_get_conv("mass"),
+ units = r"\mathrm{g}")
+ _gadget_particle_fields(ptype)
+ known_registry.add_field((ptype, "Velocities"), function=NullFunc,
+ particle_type = True,
+ convert_function=_get_conv("velocity"),
+ units = r"\mathrm{cm}/\mathrm{s}")
+ particle_deposition_functions(ptype, "Coordinates", "Mass", field_registry)
+ particle_scalar_functions(ptype, "Coordinates", "Velocities", field_registry)
+ known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
+ particle_type = True)
+ # Now we add some translations.
+ GadgetFieldInfo.add_field( (ptype, "particle_index"),
+ function = TranslationFunc((ptype, "ParticleIDs")),
+ particle_type = True)
+ particle_deposition_functions("all", "Coordinates", "Mass", field_registry)
+
+ # Now we have to manually apply the splits for "all", since we don't want to
+ # use the splits defined above.
+
+ for iname, oname in [("Coordinates", "particle_position_"),
+ ("Velocities", "particle_velocity_")]:
+ for axi, ax in enumerate("xyz"):
+ func = _field_concat_slice(iname, axi)
+ field_registry.add_field(("all", oname + ax), function=func,
+ particle_type = True)
+
+# Note that we call the same function a few times here.
_gadget_ptypes = ("Gas", "Halo", "Disk", "Bulge", "Stars", "Bndry")
+_ghdf5_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
+ "PartType4", "PartType5")
-# This has to be done manually for Gadget, because some of the particles will
-# have uniform mass
-def _gadget_particle_fields(ptype):
- def _Mass(field, data):
- pind = _gadget_ptypes.index(ptype)
- if data.pf["Massarr"][pind] == 0.0:
- return data[ptype, "Masses"].copy()
- mass = np.ones(data[ptype, "ParticleIDs"].shape[0], dtype="float64")
- # Note that this is an alias, which is why we need to apply conversion
- # here. Otherwise we'd have an asymmetry.
- mass *= data.pf["Massarr"][pind] * data.convert("mass")
- return mass
- GadgetFieldInfo.add_field((ptype, "Mass"), function=_Mass,
- particle_type = True)
+_setup_gadget_fields(_gadget_ptypes,
+ GadgetFieldInfo,
+ KnownGadgetFields)
+_setup_gadget_fields(_ghdf5_ptypes,
+ GadgetHDF5FieldInfo,
+ KnownGadgetHDF5Fields)
-for fname in ["Coordinates", "Velocities", "ParticleIDs",
- # Note: Mass, not Masses
- "Mass", "particle_index"]:
- func = _field_concat(fname)
- GadgetFieldInfo.add_field(("all", fname), function=func,
- particle_type = True)
-
-for ptype in _gadget_ptypes:
- KnownGadgetFields.add_field((ptype, "Masses"), function=NullFunc,
- particle_type = True,
- convert_function=_get_conv("mass"),
- units = r"\mathrm{g}")
- _gadget_particle_fields(ptype)
- KnownGadgetFields.add_field((ptype, "Velocities"), function=NullFunc,
- particle_type = True,
- convert_function=_get_conv("velocity"),
- units = r"\mathrm{cm}/\mathrm{s}")
- particle_deposition_functions(ptype, "Coordinates", "Mass", GadgetFieldInfo)
- particle_scalar_functions(ptype, "Coordinates", "Velocities", GadgetFieldInfo)
- KnownGadgetFields.add_field((ptype, "Coordinates"), function=NullFunc,
- particle_type = True)
-
- # Now we add some translations.
- GadgetFieldInfo.add_field( (ptype, "particle_index"),
- function = TranslationFunc((ptype, "ParticleIDs")),
- particle_type = True)
-particle_deposition_functions("all", "Coordinates", "Mass", GadgetFieldInfo)
-
-# Now we have to manually apply the splits for "all", since we don't want to
-# use the splits defined above.
-
-for iname, oname in [("Coordinates", "particle_position_"),
- ("Velocities", "particle_velocity_")]:
- for axi, ax in enumerate("xyz"):
- func = _field_concat_slice(iname, axi)
- GadgetFieldInfo.add_field(("all", oname + ax), function=func,
- particle_type = True)
# OWLS
# ====
@@ -160,7 +175,7 @@
# make OWLS a subclass of Gadget fields.
_owls_ptypes = ("PartType0", "PartType1", "PartType2", "PartType3",
- "PartType4")
+ "PartType4", "PartType5")
for fname in ["Coordinates", "Velocities", "ParticleIDs",
# Note: Mass, not Masses
diff -r 2ce570734217cbe3d3f313ffa5cfd3c9ece23c65 -r 8bd72c29599eb8c64b746154ca28280f0ec6c046 yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -134,6 +134,8 @@
f.close()
return fields
+class IOHandlerGadgetHDF5(IOHandlerOWLS):
+ _data_style = "gadget_hdf5"
ZeroMass = object()
https://bitbucket.org/yt_analysis/yt-3.0/commits/580008a74e51/
Changeset: 580008a74e51
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 00:10:50
Summary: Adding optional suffix for Gadget HDF5.
Affected #: 1 file
diff -r a9ac49222ae65384288199c974678a6fbe3b070e -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b yt/frontends/sph/data_structures.py
--- a/yt/frontends/sph/data_structures.py
+++ b/yt/frontends/sph/data_structures.py
@@ -125,6 +125,7 @@
_fieldinfo_known = KnownGadgetFields
_particle_mass_name = "Mass"
_particle_coordinates_name = "Coordinates"
+ _suffix = ""
_header_spec = (('Npart', 6, 'i'),
('Massarr', 6, 'd'),
('Time', 1, 'd'),
@@ -229,10 +230,9 @@
self.parameters = hvals
prefix = self.parameter_filename.split(".", 1)[0]
- # suffix = self.parameter_filename.rsplit(".", 1)[-1]
if hvals["NumFiles"] > 1:
- self.filename_template = "%s.%%(num)s" % (prefix)
+ self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix)
else:
self.filename_template = self.parameter_filename
@@ -344,6 +344,7 @@
_file_class = ParticleFile
_fieldinfo_fallback = GadgetHDF5FieldInfo
_fieldinfo_known = KnownGadgetHDF5Fields
+ _suffix = ".hdf5"
def __init__(self, filename, data_style="gadget_hdf5",
unit_base = None, n_ref=64,
https://bitbucket.org/yt_analysis/yt-3.0/commits/e62a4472597a/
Changeset: e62a4472597a
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 00:21:56
Summary: Merging from mainline development.
Affected #: 48 files
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba CITATION
--- /dev/null
+++ b/CITATION
@@ -0,0 +1,31 @@
+To cite yt in publications, please use:
+
+Turk, M. J., Smith, B. D., Oishi, J. S., et al. 2011, ApJS, 192, 9
+
+In the body of the text, please add a footnote to the yt webpage:
+
+http://yt-project.org/
+
+For LaTex and BibTex users:
+
+\bibitem[Turk et al.(2011)]{2011ApJS..192....9T} Turk, M.~J., Smith, B.~D.,
+Oishi, J.~S., et al.\ 2011, \apjs, 192, 9
+
+ at ARTICLE{2011ApJS..192....9T,
+ author = {{Turk}, M.~J. and {Smith}, B.~D. and {Oishi}, J.~S. and {Skory}, S. and
+{Skillman}, S.~W. and {Abel}, T. and {Norman}, M.~L.},
+ title = "{yt: A Multi-code Analysis Toolkit for Astrophysical Simulation Data}",
+ journal = {\apjs},
+archivePrefix = "arXiv",
+ eprint = {1011.3514},
+ primaryClass = "astro-ph.IM",
+ keywords = {cosmology: theory, methods: data analysis, methods: numerical},
+ year = 2011,
+ month = jan,
+ volume = 192,
+ eid = {9},
+ pages = {9},
+ doi = {10.1088/0067-0049/192/1/9},
+ adsurl = {http://adsabs.harvard.edu/abs/2011ApJS..192....9T},
+ adsnote = {Provided by the SAO/NASA Astrophysics Data System}
+}
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba doc/get_yt.sh
--- /dev/null
+++ b/doc/get_yt.sh
@@ -0,0 +1,358 @@
+#
+# Hi there! Welcome to the yt installation script.
+#
+# This script is designed to create a fully isolated Python installation
+# with the dependencies you need to run yt.
+#
+# This script is based on Conda, a distribution mechanism from Continuum
+# Analytics. The process is as follows:
+#
+# 1. Download the appropriate Conda installation package
+# 2. Install Conda into the specified directory
+# 3. Install yt-specific dependencies
+# 4. Install yt
+#
+# There are a few options listed below, but by default, this will install
+# everything. At the end, it will tell you what to do to use yt.
+#
+# By default this will install yt from source.
+#
+# If you experience problems, please visit the Help section at
+# http://yt-project.org.
+#
+DEST_SUFFIX="yt-conda"
+DEST_DIR="`pwd`/${DEST_SUFFIX/ /}" # Installation location
+BRANCH="yt" # This is the branch to which we will forcibly update.
+INST_YT_SOURCE=1 # Do we do a source install of yt?
+
+##################################################################
+# #
+# You will likely not have to modify anything below this region. #
+# #
+##################################################################
+
+LOG_FILE="`pwd`/yt_install.log"
+
+# Here is the idiom for redirecting to the log file:
+# ( SOMECOMMAND 2>&1 ) 1>> ${LOG_FILE} || do_exit
+
+MINICONDA_URLBASE="http://repo.continuum.io/miniconda"
+MINICONDA_VERSION="1.9.1"
+YT_RECIPE_REPO="https://bitbucket.org/yt_analysis/yt_conda/raw/default"
+
+function do_exit
+{
+ echo "********************************************"
+ echo " FAILURE REPORT:"
+ echo "********************************************"
+ echo
+ tail -n 10 ${LOG_FILE}
+ echo
+ echo "********************************************"
+ echo "********************************************"
+ echo "Failure. Check ${LOG_FILE}. The last 10 lines are above."
+ exit 1
+}
+
+function log_cmd
+{
+ echo "EXECUTING:" >> ${LOG_FILE}
+ echo " $*" >> ${LOG_FILE}
+ ( $* 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytproject
+{
+ [ -e $1 ] && return
+ echo "Downloading $1 from yt-project.org"
+ ${GETFILE} "http://yt-project.org/dependencies/$1" || do_exit
+ ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytdata
+{
+ echo "Downloading $1 from yt-project.org"
+ [ -e $1 ] && return
+ ${GETFILE} "http://yt-project.org/data/$1" || do_exit
+ ( ${SHASUM} -c $1.sha512 2>&1 ) 1>> ${LOG_FILE} || do_exit
+}
+
+function get_ytrecipe {
+ RDIR=${DEST_DIR}/src/yt-recipes/$1
+ mkdir -p ${RDIR}
+ pushd ${RDIR}
+ log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/meta.yaml
+ log_cmd ${GETFILE} ${YT_RECIPE_REPO}/$1/build.sh
+ NEW_PKG=`conda build --output ${RDIR}`
+ log_cmd conda build --no-binstar-upload ${RDIR}
+ log_cmd conda install ${NEW_PKG}
+ popd
+}
+
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "Hi there! This is the yt installation script. We're going to download"
+echo "some stuff and install it to create a self-contained, isolated"
+echo "environment for yt to run within."
+echo
+echo "This will install Miniconda from Continuum Analytics, the necessary"
+echo "packages to run yt, and create a self-contained environment for you to"
+echo "use yt. Additionally, Conda itself provides the ability to install"
+echo "many other packages that can be used for other purposes."
+echo
+MYOS=`uname -s` # A guess at the OS
+if [ "${MYOS##Darwin}" != "${MYOS}" ]
+then
+ echo "Looks like you're running on Mac OSX."
+ echo
+ echo "NOTE: you must have the Xcode command line tools installed."
+ echo
+ echo "The instructions for obtaining these tools varies according"
+ echo "to your exact OS version. On older versions of OS X, you"
+ echo "must register for an account on the apple developer tools"
+ echo "website: https://developer.apple.com/downloads to obtain the"
+ echo "download link."
+ echo
+ echo "We have gathered some additional instructions for each"
+ echo "version of OS X below. If you have trouble installing yt"
+ echo "after following these instructions, don't hesitate to contact"
+ echo "the yt user's e-mail list."
+ echo
+ echo "You can see which version of OSX you are running by clicking"
+ echo "'About This Mac' in the apple menu on the left hand side of"
+ echo "menu bar. We're assuming that you've installed all operating"
+ echo "system updates; if you have an older version, we suggest"
+ echo "running software update and installing all available updates."
+ echo
+ echo "OS X 10.5.8: search for and download Xcode 3.1.4 from the"
+ echo "Apple developer tools website."
+ echo
+ echo "OS X 10.6.8: search for and download Xcode 3.2 from the Apple"
+ echo "developer tools website. You can either download the"
+ echo "Xcode 3.2.2 Developer Tools package (744 MB) and then use"
+ echo "Software Update to update to XCode 3.2.6 or"
+ echo "alternatively, you can download the Xcode 3.2.6/iOS SDK"
+ echo "bundle (4.1 GB)."
+ echo
+ echo "OS X 10.7.5: download Xcode 4.2 from the mac app store"
+ echo "(search for Xcode)."
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
+ echo
+ echo "OS X 10.8.2: download Xcode 4.6.1 from the mac app store."
+ echo "(search for Xcode)."
+ echo "Additionally, you will have to manually install the Xcode"
+ echo "command line tools, see:"
+ echo "http://stackoverflow.com/questions/9353444"
+ echo "Alternatively, download the Xcode command line tools from"
+ echo "the Apple developer tools website."
+ echo
+ echo "NOTE: It's possible that the installation will fail, if so,"
+ echo "please set the following environment variables, remove any"
+ echo "broken installation tree, and re-run this script verbatim."
+ echo
+ echo "$ export CC=gcc"
+ echo "$ export CXX=g++"
+ echo
+ MINICONDA_OS="MacOSX-x86_64"
+fi
+if [ "${MYOS##Linux}" != "${MYOS}" ]
+then
+ echo "Looks like you're on Linux."
+ echo
+ echo "Please make sure you have the developer tools for your OS installed."
+ echo
+ if [ -f /etc/SuSE-release ] && [ `grep --count SUSE /etc/SuSE-release` -gt 0 ]
+ then
+ echo "Looks like you're on an OpenSUSE-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * devel_C_C++"
+ echo " * libopenssl-devel"
+ echo " * libuuid-devel"
+ echo " * zip"
+ echo " * gcc-c++"
+ echo " * chrpath"
+ echo
+ echo "You can accomplish this by executing:"
+ echo
+ echo "$ sudo zypper install -t pattern devel_C_C++"
+ echo "$ sudo zypper install gcc-c++ libopenssl-devel libuuid-devel zip"
+ echo "$ sudo zypper install chrpath"
+ fi
+ if [ -f /etc/lsb-release ] && [ `grep --count buntu /etc/lsb-release` -gt 0 ]
+ then
+ echo "Looks like you're on an Ubuntu-compatible machine."
+ echo
+ echo "You need to have these packages installed:"
+ echo
+ echo " * libssl-dev"
+ echo " * build-essential"
+ echo " * libncurses5"
+ echo " * libncurses5-dev"
+ echo " * zip"
+ echo " * uuid-dev"
+ echo " * chrpath"
+ echo
+ echo "You can accomplish this by executing:"
+ echo
+ echo "$ sudo apt-get install libssl-dev build-essential libncurses5 libncurses5-dev zip uuid-dev chrpath"
+ echo
+ fi
+ echo
+ echo "If you are running on a supercomputer or other module-enabled"
+ echo "system, please make sure that the GNU module has been loaded."
+ echo
+ if [ "${MYOS##x86_64}" != "${MYOS}" ]
+ then
+ MINICONDA_OS="Linux-x86_64"
+ elif [ "${MYOS##i386}" != "${MYOS}" ]
+ then
+ MINICONDA_OS="Linux-x86"
+ else
+ echo "Not sure which type of Linux you're on. Going with x86_64."
+ MINICONDA_OS="Linux-x86_64"
+ fi
+fi
+echo
+echo "If you'd rather not continue, hit Ctrl-C."
+echo
+echo "========================================================================"
+echo
+read -p "[hit enter] "
+echo
+echo "Awesome! Here we go."
+echo
+
+MINICONDA_PKG=Miniconda-${MINICONDA_VERSION}-${MINICONDA_OS}.sh
+
+if type -P wget &>/dev/null
+then
+ echo "Using wget"
+ export GETFILE="wget -nv"
+else
+ echo "Using curl"
+ export GETFILE="curl -sSO"
+fi
+
+echo
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}"
+echo "Downloading ${MINICONDA_URLBASE}/${MINICONDA_PKG}" >> ${LOG_FILE}
+echo
+
+${GETFILE} ${MINICONDA_URLBASE}/${MINICONDA_PKG} || do_exit
+
+echo "Installing the Miniconda python environment."
+
+log_cmd bash ./${MINICONDA_PKG} -b -p $DEST_DIR
+
+# I don't think we need OR want this anymore:
+#export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+
+# This we *do* need.
+export PATH=${DEST_DIR}/bin:$PATH
+
+echo "Installing the necessary packages for yt."
+echo "This may take a while, but don't worry. yt loves you."
+
+declare -a YT_DEPS
+YT_DEPS+=('python')
+YT_DEPS+=('distribute')
+YT_DEPS+=('libpng')
+YT_DEPS+=('freetype')
+YT_DEPS+=('hdf5')
+YT_DEPS+=('numpy')
+YT_DEPS+=('pygments')
+YT_DEPS+=('jinja2')
+YT_DEPS+=('tornado')
+YT_DEPS+=('pyzmq')
+YT_DEPS+=('ipython')
+YT_DEPS+=('sphinx')
+YT_DEPS+=('h5py')
+YT_DEPS+=('matplotlib')
+YT_DEPS+=('cython')
+
+# Here is our dependency list for yt
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/dev
+log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/gpl
+log_cmd conda update --yes conda
+
+echo "Current dependencies: ${YT_DEPS[@]}"
+log_cmd echo "DEPENDENCIES" ${YT_DEPS[@]}
+log_cmd conda install --yes ${YT_DEPS[@]}
+
+echo "Installing mercurial."
+get_ytrecipe mercurial
+
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+ echo "Installing yt as a package."
+ get_ytrecipe yt
+else
+ # We do a source install.
+ YT_DIR="${DEST_DIR}/src/yt-hg"
+ export PNG_DIR=${DEST_DIR}
+ export FTYPE_DIR=${DEST_DIR}
+ export HDF5_DIR=${DEST_DIR}
+ log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
+ pushd ${YT_DIR}
+ echo $DEST_DIR > hdf5.cfg
+ log_cmd python setup.py develop
+ popd
+ log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate
+ log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate
+ log_cmd cp ${YT_DIR}/doc/activate.csh ${DEST_DIR}/bin/activate.csh
+ log_cmd sed -i.bak -e "s,__YT_DIR__,${DEST_DIR}," ${DEST_DIR}/bin/activate.csh
+fi
+
+echo
+echo
+echo "========================================================================"
+echo
+echo "yt and the Conda system are now installed in $DEST_DIR ."
+echo
+if [ $INST_YT_SOURCE -eq 0 ]
+then
+ echo "You must now modify your PATH variable by prepending:"
+ echo
+ echo " $DEST_DIR/bin"
+ echo
+ echo "For example, if you use bash, place something like this at the end"
+ echo "of your ~/.bashrc :"
+ echo
+ echo " export PATH=$DEST_DIR/bin:$PATH"
+else
+ echo "To run from this new installation, use the activate script for this "
+ echo "environment."
+ echo
+ echo " $ source $DEST_DIR/bin/activate"
+ echo
+ echo "This modifies the environment variables YT_DEST, PATH, PYTHONPATH, and"
+ echo "LD_LIBRARY_PATH to match your new yt install. If you use csh, just"
+ echo "append .csh to the above."
+fi
+echo
+echo "To get started with yt, check out the orientation:"
+echo
+echo " http://yt-project.org/doc/orientation/"
+echo
+echo "or just activate your environment and run 'yt serve' to bring up the"
+echo "yt GUI."
+echo
+echo "For support, see the website and join the mailing list:"
+echo
+echo " http://yt-project.org/"
+echo " http://yt-project.org/data/ (Sample data)"
+echo " http://yt-project.org/doc/ (Docs)"
+echo
+echo " http://lists.spacepope.org/listinfo.cgi/yt-users-spacepope.org"
+echo
+echo "========================================================================"
+echo
+echo "Oh, look at me, still talking when there's science to do!"
+echo "Good luck, and email the user list if you run into any problems."
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -327,7 +327,7 @@
icoords = chunk.icoords
i1 = icoords[:,x_dict[self.axis]]
i2 = icoords[:,y_dict[self.axis]]
- ilevel = chunk.ires
+ ilevel = chunk.ires * self.pf.ires_factor
tree.initialize_chunk(i1, i2, ilevel)
def _handle_chunk(self, chunk, fields, tree):
@@ -347,7 +347,7 @@
icoords = chunk.icoords
i1 = icoords[:,x_dict[self.axis]]
i2 = icoords[:,y_dict[self.axis]]
- ilevel = chunk.ires
+ ilevel = chunk.ires * self.pf.ires_factor
tree.add_chunk_to_tree(i1, i2, ilevel, v, w)
def to_pw(self, fields=None, center='c', width=None, axes_unit=None,
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -70,7 +70,7 @@
"""
if self.start_index is not None:
return self.start_index
- if self.Parent == None:
+ if self.Parent is None:
left = self.LeftEdge - self.pf.domain_left_edge
start_index = left / self.dds
return np.rint(start_index).astype('int64').ravel()
@@ -131,51 +131,6 @@
if self.pf.dimensionality < 2: self.dds[1] = self.pf.domain_right_edge[1] - self.pf.domain_left_edge[1]
if self.pf.dimensionality < 3: self.dds[2] = self.pf.domain_right_edge[2] - self.pf.domain_left_edge[2]
- @property
- def _corners(self):
- return np.array([ # Unroll!
- [self.LeftEdge[0], self.LeftEdge[1], self.LeftEdge[2]],
- [self.RightEdge[0], self.LeftEdge[1], self.LeftEdge[2]],
- [self.RightEdge[0], self.RightEdge[1], self.LeftEdge[2]],
- [self.RightEdge[0], self.RightEdge[1], self.RightEdge[2]],
- [self.LeftEdge[0], self.RightEdge[1], self.RightEdge[2]],
- [self.LeftEdge[0], self.LeftEdge[1], self.RightEdge[2]],
- [self.RightEdge[0], self.LeftEdge[1], self.RightEdge[2]],
- [self.LeftEdge[0], self.RightEdge[1], self.LeftEdge[2]],
- ], dtype='float64')
-
- def _generate_overlap_masks(self, axis, LE, RE):
- """
- Generate a mask that shows which cells overlap with arbitrary arrays
- *LE* and *RE*) of edges, typically grids, along *axis*.
- Use algorithm described at http://www.gamedev.net/reference/articles/article735.asp
-
- """
- x = x_dict[axis]
- y = y_dict[axis]
- cond = self.RightEdge[x] >= LE[:,x]
- cond = np.logical_and(cond, self.LeftEdge[x] <= RE[:,x])
- cond = np.logical_and(cond, self.RightEdge[y] >= LE[:,y])
- cond = np.logical_and(cond, self.LeftEdge[y] <= RE[:,y])
- return cond
-
- def is_in_grid(self, x, y, z) :
- """
- Generate a mask that shows which points in *x*, *y*, and *z*
- fall within this grid's boundaries.
- """
- xcond = np.logical_and(x >= self.LeftEdge[0],
- x < self.RightEdge[0])
- ycond = np.logical_and(y >= self.LeftEdge[1],
- y < self.RightEdge[1])
- zcond = np.logical_and(z >= self.LeftEdge[2],
- z < self.RightEdge[2])
-
- cond = np.logical_and(xcond, ycond)
- cond = np.logical_and(zcond, cond)
-
- return cond
-
def __repr__(self):
return "AMRGridPatch_%04i" % (self.id)
@@ -189,13 +144,8 @@
"""
super(AMRGridPatch, self).clear_data()
- self._del_child_mask()
- self._del_child_indices()
self._setup_dx()
- def check_child_masks(self):
- return self._child_mask, self._child_indices
-
def _prepare_grid(self):
""" Copies all the appropriate attributes from the hierarchy. """
# This is definitely the slowest part of generating the hierarchy
@@ -211,89 +161,12 @@
#self.Time = h.gridTimes[my_ind,0]
self.NumberOfParticles = h.grid_particle_count[my_ind, 0]
- def find_max(self, field):
- """ Returns value, index of maximum value of *field* in this grid. """
- coord1d = (self[field] * self.child_mask).argmax()
- coord = np.unravel_index(coord1d, self[field].shape)
- val = self[field][coord]
- return val, coord
-
- def find_min(self, field):
- """ Returns value, index of minimum value of *field* in this grid. """
- coord1d = (self[field] * self.child_mask).argmin()
- coord = np.unravel_index(coord1d, self[field].shape)
- val = self[field][coord]
- return val, coord
-
def get_position(self, index):
""" Returns center position of an *index*. """
pos = (index + 0.5) * self.dds + self.LeftEdge
return pos
- def clear_all(self):
- """
- Clears all datafields from memory and calls
- :meth:`clear_derived_quantities`.
-
- """
- for key in self.keys():
- del self.field_data[key]
- del self.field_data
- if hasattr(self,"retVal"):
- del self.retVal
- self.field_data = YTFieldData()
- self.clear_derived_quantities()
- del self.child_mask
- del self.child_ind
-
- def _set_child_mask(self, newCM):
- if self._child_mask != None:
- mylog.warning("Overriding child_mask attribute! This is probably unwise!")
- self._child_mask = newCM
-
- def _set_child_indices(self, newCI):
- if self._child_indices != None:
- mylog.warning("Overriding child_indices attribute! This is probably unwise!")
- self._child_indices = newCI
-
- def _get_child_mask(self):
- if self._child_mask == None:
- self.__generate_child_mask()
- return self._child_mask
-
- def _get_child_indices(self):
- if self._child_indices == None:
- self.__generate_child_mask()
- return self._child_indices
-
- def _del_child_indices(self):
- try:
- del self._child_indices
- except AttributeError:
- pass
- self._child_indices = None
-
- def _del_child_mask(self):
- try:
- del self._child_mask
- except AttributeError:
- pass
- self._child_mask = None
-
- def _get_child_index_mask(self):
- if self._child_index_mask is None:
- self.__generate_child_index_mask()
- return self._child_index_mask
-
- def _del_child_index_mask(self):
- try:
- del self._child_index_mask
- except AttributeError:
- pass
- self._child_index_mask = None
-
- #@time_execution
- def __fill_child_mask(self, child, mask, tofill, dlevel = 1):
+ def _fill_child_mask(self, child, mask, tofill, dlevel = 1):
rf = self.pf.refine_by
if dlevel != 1:
rf = rf**dlevel
@@ -306,61 +179,37 @@
startIndex[1]:endIndex[1],
startIndex[2]:endIndex[2]] = tofill
- def __generate_child_mask(self):
+ @property
+ def child_mask(self):
"""
Generates self.child_mask, which is zero where child grids exist (and
thus, where higher resolution data is available).
"""
- self._child_mask = np.ones(self.ActiveDimensions, 'bool')
+ child_mask = np.ones(self.ActiveDimensions, 'bool')
for child in self.Children:
- self.__fill_child_mask(child, self._child_mask, 0)
- if self.OverlappingSiblings is not None:
- for sibling in self.OverlappingSiblings:
- self.__fill_child_mask(sibling, self._child_mask, 0)
-
- self._child_indices = (self._child_mask==0) # bool, possibly redundant
+ self._fill_child_mask(child, child_mask, 0)
+ for sibling in self.OverlappingSiblings or []:
+ self._fill_child_mask(sibling, child_mask, 0)
+ return child_mask
- def __generate_child_index_mask(self):
+ @property
+ def child_indices(self):
+ return (self.child_mask == 0)
+
+ @property
+ def child_index_mask(self):
"""
Generates self.child_index_mask, which is -1 where there is no child,
and otherwise has the ID of the grid that resides there.
"""
- self._child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
+ child_index_mask = np.zeros(self.ActiveDimensions, 'int32') - 1
for child in self.Children:
- self.__fill_child_mask(child, self._child_index_mask,
- child.id)
- if self.OverlappingSiblings is not None:
- for sibling in self.OverlappingSiblings:
- self.__fill_child_mask(sibling, self._child_index_mask,
- sibling.id)
-
- def _get_coords(self):
- if self.__coords == None: self._generate_coords()
- return self.__coords
-
- def _set_coords(self, new_c):
- if self.__coords != None:
- mylog.warning("Overriding coords attribute! This is probably unwise!")
- self.__coords = new_c
-
- def _del_coords(self):
- del self.__coords
- self.__coords = None
-
- def _generate_coords(self):
- """
- Creates self.coords, which is of dimensions (3, ActiveDimensions)
-
- """
- ind = np.indices(self.ActiveDimensions)
- left_shaped = np.reshape(self.LeftEdge, (3, 1, 1, 1))
- self['x'], self['y'], self['z'] = (ind + 0.5) * self.dds + left_shaped
-
- child_mask = property(fget=_get_child_mask, fdel=_del_child_mask)
- child_index_mask = property(fget=_get_child_index_mask, fdel=_del_child_index_mask)
- child_indices = property(fget=_get_child_indices, fdel = _del_child_indices)
+ self._fill_child_mask(child, child_index_mask, child.id)
+ for sibling in self.OverlappingSiblings or []:
+ self._fill_child_mask(sibling, child_index_mask, sibling.id)
+ return child_index_mask
def retrieve_ghost_zones(self, n_zones, fields, all_levels=False,
smoothed=False):
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -30,6 +30,16 @@
import yt.geometry.particle_smooth as particle_smooth
from yt.funcs import *
+def cell_count_cache(func):
+ def cc_cache_func(self, dobj):
+ if hash(dobj.selector) != self._last_selector_id:
+ self._cell_count = -1
+ rv = func(self, dobj)
+ self._cell_count = rv.shape[0]
+ self._last_selector_id = hash(dobj.selector)
+ return rv
+ return cc_cache_func
+
class OctreeSubset(YTSelectionContainer):
_spatial = True
_num_ghost_zones = 0
@@ -38,7 +48,7 @@
_con_args = ('base_region', 'domain', 'pf')
_container_fields = ("dx", "dy", "dz")
_domain_offset = 0
- _num_octs = -1
+ _cell_count = -1
def __init__(self, base_region, domain, pf, over_refine_factor = 1):
self._num_zones = 1 << (over_refine_factor)
@@ -161,37 +171,25 @@
vals = np.asfortranarray(vals)
return vals
+ @cell_count_cache
def select_icoords(self, dobj):
- d = self.oct_handler.icoords(self.selector, domain_id = self.domain_id,
- num_octs = self._num_octs)
- self._num_octs = d.shape[0] / (self.nz**3)
- tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
- domain_id = self.domain_id)
- return tr
+ return self.oct_handler.icoords(dobj.selector, domain_id = self.domain_id,
+ num_cells = self._cell_count)
+ @cell_count_cache
def select_fcoords(self, dobj):
- d = self.oct_handler.fcoords(self.selector, domain_id = self.domain_id,
- num_octs = self._num_octs)
- self._num_octs = d.shape[0] / (self.nz**3)
- tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
- domain_id = self.domain_id)
- return tr
+ return self.oct_handler.fcoords(dobj.selector, domain_id = self.domain_id,
+ num_cells = self._cell_count)
+ @cell_count_cache
def select_fwidth(self, dobj):
- d = self.oct_handler.fwidth(self.selector, domain_id = self.domain_id,
- num_octs = self._num_octs)
- self._num_octs = d.shape[0] / (self.nz**3)
- tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 3,
- domain_id = self.domain_id)
- return tr
+ return self.oct_handler.fwidth(dobj.selector, domain_id = self.domain_id,
+ num_cells = self._cell_count)
+ @cell_count_cache
def select_ires(self, dobj):
- d = self.oct_handler.ires(self.selector, domain_id = self.domain_id,
- num_octs = self._num_octs)
- self._num_octs = d.shape[0] / (self.nz**3)
- tr = self.oct_handler.selector_fill(dobj.selector, d, None, 0, 1,
- domain_id = self.domain_id)
- return tr
+ return self.oct_handler.ires(dobj.selector, domain_id = self.domain_id,
+ num_cells = self._cell_count)
def select(self, selector, source, dest, offset):
n = self.oct_handler.selector_fill(selector, source, dest, offset,
@@ -199,11 +197,7 @@
return n
def count(self, selector):
- if hash(selector) == self._last_selector_id:
- if self._last_mask is None: return 0
- return self._last_mask.sum()
- self.select(selector)
- return self.count(selector)
+ return -1
def count_particles(self, selector, x, y, z):
# We don't cache the selector results
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/data_objects/selection_data_containers.py
--- a/yt/data_objects/selection_data_containers.py
+++ b/yt/data_objects/selection_data_containers.py
@@ -141,50 +141,6 @@
else:
raise KeyError(field)
- def _get_data_from_grid(self, grid, field):
- if self.pf.geometry == "cylindrical":
- if grid.id in self._masks:
- mask = self._masks[grid.id]
- else:
- mask = self._get_cut_mask(grid)
- ts, dts = self._ts[grid.id], self._dts[grid.id]
- else:
- mask = np.logical_and(self._get_cut_mask(grid), grid.child_mask)
- ts, dts = self._ts[grid.id][mask], self._dts[grid.id][mask]
-
- if field == 'dts':
- return dts
- if field == 't':
- return ts
-
- gf = grid[field]
- if not iterable(gf):
- gf = gf * np.ones(grid.child_mask.shape)
- return gf[mask]
-
- def _get_cut_mask(self, grid):
- if self.pf.geometry == "cylindrical":
- _ = clyindrical_ray_trace(self.start_point, self.end_point,
- grid.LeftEdge, grid.RightEdge)
- ts, s, rzt, mask = _
- dts = np.empty(ts.shape, dtype='float64')
- dts[0], dts[1:] = 0.0, ts[1:] - ts[:-1]
- grid['r'], grid['z'], grid['theta'] = rzt[:,0], rzt[:,1], rzt[:,2]
- grid['s'] = s
- else:
- mask = np.zeros(grid.ActiveDimensions, dtype='int')
- dts = np.zeros(grid.ActiveDimensions, dtype='float64')
- ts = np.zeros(grid.ActiveDimensions, dtype='float64')
- VoxelTraversal(mask, ts, dts, grid.LeftEdge, grid.RightEdge,
- grid.dds, self.center, self.vec)
- dts = np.abs(dts)
- ts = np.abs(ts)
- self._dts[grid.id] = dts
- self._ts[grid.id] = ts
- self._masks[grid.id] = mask
- return mask
-
-
class YTSliceBase(YTSelectionContainer2D):
"""
This is a data object corresponding to a slice through the simulation
@@ -249,10 +205,6 @@
else:
raise KeyError(field)
- def _gen_node_name(self):
- return "%s/%s_%s" % \
- (self._top_node, self.axis, self.coord)
-
@property
def _mrep(self):
return MinimalSliceData(self)
@@ -520,156 +472,6 @@
frb = ObliqueFixedResolutionBuffer(self, bounds, resolution)
return frb
-class YTFixedResCuttingPlaneBase(YTSelectionContainer2D):
- """
- The fixed resolution Cutting Plane slices at an oblique angle,
- where we use the *normal* vector at the *center* to define the
- viewing plane. The plane is *width* units wide. The 'up'
- direction is guessed at automatically if not given.
- """
- _top_node = "/FixedResCuttingPlanes"
- _type_name = "fixed_res_cutting"
- _con_args = ('normal', 'center', 'width', 'dims')
- def __init__(self, normal, center, width, dims, pf = None,
- node_name = None, field_parameters = None):
- #
- # Taken from Cutting Plane
- #
- YTSelectionContainer2D.__init__(self, 4, pf, field_parameters)
- self._set_center(center)
- self.width = width
- self.dims = dims
- self.dds = self.width / self.dims
- self.bounds = np.array([0.0,1.0,0.0,1.0])
-
- self.set_field_parameter('center', center)
- # Let's set up our plane equation
- # ax + by + cz + d = 0
- self._norm_vec = normal/np.sqrt(np.dot(normal,normal))
- self._d = -1.0 * np.dot(self._norm_vec, self.center)
- # First we try all three, see which has the best result:
- vecs = np.identity(3)
- _t = np.cross(self._norm_vec, vecs).sum(axis=1)
- ax = _t.argmax()
- self._x_vec = np.cross(vecs[ax,:], self._norm_vec).ravel()
- self._x_vec /= np.sqrt(np.dot(self._x_vec, self._x_vec))
- self._y_vec = np.cross(self._norm_vec, self._x_vec).ravel()
- self._y_vec /= np.sqrt(np.dot(self._y_vec, self._y_vec))
- self._rot_mat = np.array([self._x_vec,self._y_vec,self._norm_vec])
- self._inv_mat = np.linalg.pinv(self._rot_mat)
- self.set_field_parameter('cp_x_vec',self._x_vec)
- self.set_field_parameter('cp_y_vec',self._y_vec)
- self.set_field_parameter('cp_z_vec',self._norm_vec)
-
- # Calculate coordinates of each pixel
- _co = self.dds * \
- (np.mgrid[-self.dims/2 : self.dims/2,
- -self.dims/2 : self.dims/2] + 0.5)
- self._coord = self.center + np.outer(_co[0,:,:], self._x_vec) + \
- np.outer(_co[1,:,:], self._y_vec)
- self._pixelmask = np.ones(self.dims*self.dims, dtype='int8')
-
- if node_name is not False:
- if node_name is True: self._deserialize()
- else: self._deserialize(node_name)
-
- @property
- def normal(self):
- return self._norm_vec
-
- def _get_list_of_grids(self):
- # Just like the Cutting Plane but restrict the grids to be
- # within width/2 of the center.
- vertices = self.hierarchy.gridCorners
- # Shape = (8,3,n_grid)
- D = np.sum(self._norm_vec.reshape((1,3,1)) * vertices, axis=1) + self._d
- valid_grids = np.where(np.logical_not(np.all(D<0,axis=0) |
- np.all(D>0,axis=0) ))[0]
- # Now restrict these grids to a rect. prism that bounds the slice
- sliceCorners = np.array([ \
- self.center + 0.5*self.width * (+self._x_vec + self._y_vec),
- self.center + 0.5*self.width * (+self._x_vec - self._y_vec),
- self.center + 0.5*self.width * (-self._x_vec - self._y_vec),
- self.center + 0.5*self.width * (-self._x_vec + self._y_vec) ])
- sliceLeftEdge = sliceCorners.min(axis=0)
- sliceRightEdge = sliceCorners.max(axis=0)
- # Check for bounding box and grid overlap
- leftOverlap = np.less(self.hierarchy.gridLeftEdge[valid_grids],
- sliceRightEdge).all(axis=1)
- rightOverlap = np.greater(self.hierarchy.gridRightEdge[valid_grids],
- sliceLeftEdge).all(axis=1)
- self._grids = self.hierarchy.grids[valid_grids[
- np.where(leftOverlap & rightOverlap)]]
- self._grids = self._grids[::-1]
-
- def _generate_coords(self):
- self['px'] = self._coord[:,0].ravel()
- self['py'] = self._coord[:,1].ravel()
- self['pz'] = self._coord[:,2].ravel()
- self['pdx'] = self.dds * 0.5
- self['pdy'] = self.dds * 0.5
- #self['pdz'] = self.dds * 0.5
-
- def _get_data_from_grid(self, grid, field):
- if not self.pf.field_info[field].particle_type:
- pointI = self._get_point_indices(grid)
- if len(pointI) == 0: return
- vc = self._calc_vertex_centered_data(grid, field)
- bds = np.array(zip(grid.LeftEdge,
- grid.RightEdge)).ravel()
- interp = TrilinearFieldInterpolator(vc, bds, ['x', 'y', 'z'])
- self[field][pointI] = interp( \
- dict(x=self._coord[pointI,0],
- y=self._coord[pointI,1],
- z=self._coord[pointI,2])).ravel()
-
- # Mark these pixels to speed things up
- self._pixelmask[pointI] = 0
-
- return
- else:
- raise SyntaxError("Making a fixed resolution slice with "
- "particles isn't supported yet.")
-
- def get_data(self, fields):
- """
- Iterates over the list of fields and generates/reads them all.
- """
- self._get_list_of_grids()
- if not self.has_key('pdx'):
- self._generate_coords()
- fields_to_get = ensure_list(fields)
- temp_data = {}
- _size = self.dims * self.dims
- for field in fields_to_get:
- if self.field_data.has_key(field): continue
- if field not in self.hierarchy.field_list:
- if self._generate_field(field):
- continue # A "True" return means we did it
- self[field] = np.zeros(_size, dtype='float64')
- for grid in self._get_grids():
- self._get_data_from_grid(grid, field)
- self[field] = self.comm.mpi_allreduce(\
- self[field], op='sum').reshape([self.dims]*2).transpose()
-
- def _calc_vertex_centered_data(self, grid, field):
- #return grid.retrieve_ghost_zones(1, field, smoothed=False)
- return grid.get_vertex_centered_data(field)
-
- def _get_point_indices(self, grid):
- if self._pixelmask.max() == 0: return []
- k = planar_points_in_volume(self._coord, self._pixelmask,
- grid.LeftEdge, grid.RightEdge,
- grid.child_mask, just_one(grid['dx']))
- return k
-
- def _gen_node_name(self):
- cen_name = ("%s" % (self.center,)).replace(" ","_")[1:-1]
- L_name = ("%s" % self._norm_vec).replace(" ","_")[1:-1]
- return "%s/c%s_L%s" % \
- (self._top_node, cen_name, L_name)
-
-
class YTDiskBase(YTSelectionContainer3D):
"""
By providing a *center*, a *normal*, a *radius* and a *height* we
@@ -687,113 +489,6 @@
self._radius = fix_length(radius, self.pf)
self._d = -1.0 * np.dot(self._norm_vec, self.center)
- def _get_list_of_grids(self):
- H = np.sum(self._norm_vec.reshape((1,3,1)) * self.pf.h.grid_corners,
- axis=1) + self._d
- D = np.sqrt(np.sum((self.pf.h.grid_corners -
- self.center.reshape((1,3,1)))**2.0,axis=1))
- R = np.sqrt(D**2.0-H**2.0)
- self._grids = self.hierarchy.grids[
- ( (np.any(np.abs(H)<self._height,axis=0))
- & (np.any(R<self._radius,axis=0)
- & (np.logical_not((np.all(H>0,axis=0) | (np.all(H<0, axis=0)))) )
- ) ) ]
- self._grids = self.hierarchy.grids
-
- def _is_fully_enclosed(self, grid):
- corners = grid._corners.reshape((8,3,1))
- H = np.sum(self._norm_vec.reshape((1,3,1)) * corners,
- axis=1) + self._d
- D = np.sqrt(np.sum((corners -
- self.center.reshape((1,3,1)))**2.0,axis=1))
- R = np.sqrt(D**2.0-H**2.0)
- return (np.all(np.abs(H) < self._height, axis=0) \
- and np.all(R < self._radius, axis=0))
-
- def _get_cut_mask(self, grid):
- if self._is_fully_enclosed(grid):
- return True
- else:
- h = grid['x'] * self._norm_vec[0] \
- + grid['y'] * self._norm_vec[1] \
- + grid['z'] * self._norm_vec[2] \
- + self._d
- d = np.sqrt(
- (grid['x'] - self.center[0])**2.0
- + (grid['y'] - self.center[1])**2.0
- + (grid['z'] - self.center[2])**2.0
- )
- r = np.sqrt(d**2.0-h**2.0)
- cm = ( (np.abs(h) <= self._height)
- & (r <= self._radius))
- return cm
-
-
-class YTInclinedBoxBase(YTSelectionContainer3D):
- """
- A rectangular prism with arbitrary alignment to the computational
- domain. *origin* is the origin of the box, while *box_vectors* is an
- array of ordering [ax, ijk] that describes the three vectors that
- describe the box. No checks are done to ensure that the box satisfies
- a right-hand rule, but if it doesn't, behavior is undefined.
- """
- _type_name="inclined_box"
- _con_args = ('origin','box_vectors')
-
- def __init__(self, origin, box_vectors, fields=None,
- pf=None, **kwargs):
- self.origin = np.array(origin)
- self.box_vectors = np.array(box_vectors, dtype='float64')
- self.box_lengths = (self.box_vectors**2.0).sum(axis=1)**0.5
- center = origin + 0.5*self.box_vectors.sum(axis=0)
- YTSelectionContainer3D.__init__(self, center, fields, pf, **kwargs)
- self._setup_rotation_parameters()
-
- def _setup_rotation_parameters(self):
- xv = self.box_vectors[0,:]
- yv = self.box_vectors[1,:]
- zv = self.box_vectors[2,:]
- self._x_vec = xv / np.sqrt(np.dot(xv, xv))
- self._y_vec = yv / np.sqrt(np.dot(yv, yv))
- self._z_vec = zv / np.sqrt(np.dot(zv, zv))
- self._rot_mat = np.array([self._x_vec,self._y_vec,self._z_vec])
- self._inv_mat = np.linalg.pinv(self._rot_mat)
-
- def _get_list_of_grids(self):
- if self._grids is not None: return
- GLE = self.pf.h.grid_left_edge
- GRE = self.pf.h.grid_right_edge
- goodI = find_grids_in_inclined_box(self.box_vectors, self.center,
- GLE, GRE)
- cgrids = self.pf.h.grids[goodI.astype('bool')]
- # find_grids_in_inclined_box seems to be broken.
- cgrids = self.pf.h.grids[:]
- grids = []
- for i,grid in enumerate(cgrids):
- v = grid_points_in_volume(self.box_lengths, self.origin,
- self._rot_mat, grid.LeftEdge,
- grid.RightEdge, grid.dds,
- grid.child_mask, 1)
- if v: grids.append(grid)
- self._grids = np.empty(len(grids), dtype='object')
- for gi, g in enumerate(grids): self._grids[gi] = g
-
-
- def _is_fully_enclosed(self, grid):
- # This should be written at some point.
- # We'd rotate all eight corners into the space of the box, then check to
- # see if all are enclosed.
- return False
-
- def _get_cut_mask(self, grid):
- if self._is_fully_enclosed(grid):
- return True
- pm = np.zeros(grid.ActiveDimensions, dtype='int32')
- grid_points_in_volume(self.box_lengths, self.origin,
- self._rot_mat, grid.LeftEdge,
- grid.RightEdge, grid.dds, pm, 0)
- return pm
-
class YTRegionBase(YTSelectionContainer3D):
"""A 3D region of data with an arbitrary center.
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -314,6 +314,13 @@
return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
+ @property
+ def ires_factor(self):
+ o2 = np.log2(self.refine_by)
+ if o2 != int(o2):
+ raise RuntimeError
+ return int(o2)
+
def _reconstruct_pf(*args, **kwargs):
pfs = ParameterFileStore()
pf = pfs.get_pf_hash(*args)
diff -r 580008a74e512d3a14e2ec9e65002fc4bd390e1b -r e62a4472597af9572321605961e446aef5c32dba yt/frontends/artio/_artio_caller.pyx
--- a/yt/frontends/artio/_artio_caller.pyx
+++ b/yt/frontends/artio/_artio_caller.pyx
@@ -1,17 +1,15 @@
-"""
-
-"""
cimport cython
import numpy as np
cimport numpy as np
import sys
from yt.geometry.selection_routines cimport SelectorObject, AlwaysSelector
+from yt.utilities.lib.fp_utils cimport imax
from yt.geometry.oct_container cimport \
- OctreeContainer, OctAllocationContainer, \
SparseOctreeContainer
from yt.geometry.oct_visitors cimport \
- OctVisitorData, oct_visitor_function, Oct
+ OctVisitorData, oct_visitor_function, Oct, \
+ fill_file_indices_oind, fill_file_indices_rind
from yt.geometry.particle_deposit cimport \
ParticleDepositOperation
from libc.stdint cimport int32_t, int64_t
@@ -66,6 +64,9 @@
int artio_fileset_open_grid(artio_fileset_handle *handle)
int artio_fileset_close_grid(artio_fileset_handle *handle)
+ int artio_fileset_has_grid( artio_fileset_handle *handle )
+ int artio_fileset_has_particles( artio_fileset_handle *handle )
+
# selection functions
artio_selection *artio_selection_allocate( artio_fileset_handle *handle )
artio_selection *artio_select_all( artio_fileset_handle *handle )
@@ -136,12 +137,14 @@
cdef int64_t sfc_min, sfc_max
# grid attributes
+ cdef public int has_grid
cdef public int min_level, max_level
cdef public int num_grid_variables
cdef int *num_octs_per_level
cdef float *grid_variables
# particle attributes
+ cdef public int has_particles
cdef public int num_species
cdef int *particle_position_index
cdef int *num_particles_per_species
@@ -178,32 +181,48 @@
if (not self.num_octs_per_level) or (not self.grid_variables) :
raise MemoryError
- status = artio_fileset_open_grid( self.handle )
- check_artio_status(status)
+ if artio_fileset_has_grid(self.handle):
+ status = artio_fileset_open_grid(self.handle)
+ check_artio_status(status)
+ self.has_grid = 1
+ else:
+ self.has_grid = 0
# particle detection
- self.num_species = self.parameters['num_particle_species'][0]
- self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
- if not self.particle_position_index :
- raise MemoryError
- for ispec in range(self.num_species) :
- labels = self.parameters["species_%02d_primary_variable_labels"% (ispec,)]
- try :
- self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
- self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
- self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
- except ValueError :
- raise RuntimeError("Unable to locate position information for particle species", ispec )
+ if ( artio_fileset_has_particles(self.handle) ):
+ status = artio_fileset_open_particles(self.handle)
+ check_artio_status(status)
+ self.has_particles = 1
- self.num_particles_per_species = <int *>malloc(sizeof(int)*self.num_species)
- self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))
- self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))
- if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
- raise MemoryError
+ for v in ["num_particle_species","num_primary_variables","num_secondary_variables"]:
+ if not self.parameters.has_key(v):
+ raise RuntimeError("Unable to locate particle header information in artio header: key=", v)
- status = artio_fileset_open_particles( self.handle )
- check_artio_status(status)
-
+ self.num_species = self.parameters['num_particle_species'][0]
+ self.particle_position_index = <int *>malloc(3*sizeof(int)*self.num_species)
+ if not self.particle_position_index :
+ raise MemoryError
+ for ispec in range(self.num_species) :
+ species_labels = "species_%02d_primary_variable_labels"% (ispec,)
+ if not self.parameters.has_key(species_labels):
+ raise RuntimeError("Unable to locate variable labels for species",ispec)
+
+ labels = self.parameters[species_labels]
+ try :
+ self.particle_position_index[3*ispec+0] = labels.index('POSITION_X')
+ self.particle_position_index[3*ispec+1] = labels.index('POSITION_Y')
+ self.particle_position_index[3*ispec+2] = labels.index('POSITION_Z')
+ except ValueError :
+ raise RuntimeError("Unable to locate position information for particle species", ispec)
+
+ self.num_particles_per_species = <int *>malloc(sizeof(int)*self.num_species)
+ self.primary_variables = <double *>malloc(sizeof(double)*max(self.parameters['num_primary_variables']))
+ self.secondary_variables = <float *>malloc(sizeof(float)*max(self.parameters['num_secondary_variables']))
+ if (not self.num_particles_per_species) or (not self.primary_variables) or (not self.secondary_variables) :
+ raise MemoryError
+ else:
+ self.has_particles = 0
+
def __dealloc__(self) :
if self.num_octs_per_level : free(self.num_octs_per_level)
if self.grid_variables : free(self.grid_variables)
@@ -390,7 +409,6 @@
raise RuntimeError("Field",f,"is not known to ARTIO")
field_order[i] = var_labels.index(f)
- # dhr - cache the entire domain (replace later)
status = artio_grid_cache_sfc_range( self.handle, self.sfc_min, self.sfc_max )
check_artio_status(status)
@@ -530,6 +548,85 @@
artio_fileset_close(handle)
return True
+cdef class ARTIOSFCRangeHandler:
+ cdef public np.int64_t sfc_start
+ cdef public np.int64_t sfc_end
+ cdef public artio_fileset artio_handle
+ cdef public object root_mesh_handler
+ cdef public object oct_count
+ cdef public object octree_handler
+ cdef artio_fileset_handle *handle
+ cdef np.float64_t DLE[3]
+ cdef np.float64_t DRE[3]
+ cdef np.float64_t dds[3]
+ cdef np.int64_t dims[3]
+ cdef public np.int64_t total_octs
+
+ def __init__(self, domain_dimensions, # cells
+ domain_left_edge,
+ domain_right_edge,
+ artio_fileset artio_handle,
+ sfc_start, sfc_end):
+ cdef int i
+ self.sfc_start = sfc_start
+ self.sfc_end = sfc_end
+ self.artio_handle = artio_handle
+ self.root_mesh_handler = None
+ self.octree_handler = None
+ self.handle = artio_handle.handle
+ self.oct_count = None
+ for i in range(3):
+ self.dims[i] = domain_dimensions[i]
+ self.DLE[i] = domain_left_edge[i]
+ self.DRE[i] = domain_right_edge[i]
+ self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ def construct_mesh(self):
+ cdef int status, level
+ cdef np.int64_t sfc, oc
+ cdef double dpos[3]
+ cdef int num_oct_levels
+ cdef int max_level = self.artio_handle.max_level
+ cdef int *num_octs_per_level = <int *>malloc(
+ (max_level + 1)*sizeof(int))
+ cdef ARTIOOctreeContainer octree
+ self.octree_handler = octree = ARTIOOctreeContainer(self)
+ # We want to pre-allocate an array of root pointers. In the future,
+ # this will be pre-determined by the ARTIO library. However, because
+ # realloc plays havoc with our tree searching, we can't utilize an
+ # expanding array at the present time.
+ octree.allocate_domains([], self.sfc_end - self.sfc_start + 1)
+ cdef np.ndarray[np.int64_t, ndim=1] oct_count
+ oct_count = np.zeros(self.sfc_end - self.sfc_start + 1, dtype="int64")
+ status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
+ self.sfc_end)
+ check_artio_status(status)
+ for sfc in range(self.sfc_start, self.sfc_end + 1):
+ status = artio_grid_read_root_cell_begin( self.handle,
+ sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
+ check_artio_status(status)
+ if num_oct_levels > 0:
+ oc = 0
+ for level in range(num_oct_levels):
+ oc += num_octs_per_level[level]
+ self.total_octs += oc
+ oct_count[sfc - self.sfc_start] = oc
+ octree.initialize_local_mesh(oc, num_oct_levels,
+ num_octs_per_level, sfc)
+ status = artio_grid_read_root_cell_end( self.handle )
+ check_artio_status(status)
+ free(num_octs_per_level)
+ self.root_mesh_handler = ARTIORootMeshContainer(self)
+ self.oct_count = oct_count
+
+ def free_mesh(self):
+ self.octree_handler = None
+ self.root_mesh_handler = None
+ self.oct_count = None
+
def get_coords(artio_fileset handle, np.int64_t s):
cdef int coords[3]
artio_sfc_coords(handle.handle, s, coords)
@@ -563,163 +660,91 @@
# the file again, despite knowing the indexing system already. Because of
# this, we will avoid creating it as long as possible.
- cdef public np.int64_t sfc_start
- cdef public np.int64_t sfc_end
cdef public artio_fileset artio_handle
- cdef Oct **root_octs
- cdef np.int64_t *level_indices
+ cdef np.int64_t level_indices[32]
- def __init__(self, oct_dimensions, domain_left_edge, domain_right_edge,
- int64_t sfc_start, int64_t sfc_end, artio_fileset artio_handle):
- self.artio_handle = artio_handle
- self.sfc_start = sfc_start
- self.sfc_end = sfc_end
+ def __init__(self, ARTIOSFCRangeHandler range_handler):
+ self.artio_handle = range_handler.artio_handle
# Note the final argument is partial_coverage, which indicates whether
# or not an Oct can be partially refined.
- super(ARTIOOctreeContainer, self).__init__(oct_dimensions,
- domain_left_edge, domain_right_edge)
- self.level_indices = NULL
- self._initialize_root_mesh()
+ dims, DLE, DRE = [], [], []
+ for i in range(32):
+ self.level_indices[i] = 0
+ for i in range(3):
+ # range_handler has dims in cells, which is the same as the number
+ # of possible octs. This is because we have a forest of octrees.
+ dims.append(range_handler.dims[i])
+ DLE.append(range_handler.DLE[i])
+ DRE.append(range_handler.DRE[i])
+ super(ARTIOOctreeContainer, self).__init__(dims, DLE, DRE)
+ self.artio_handle = range_handler.artio_handle
+ self.level_offset = 1
+ self.domains = NULL
+ self.root_nodes = NULL
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
- def _initialize_root_mesh(self):
+ cdef void initialize_local_mesh(self, np.int64_t oct_count,
+ int num_oct_levels, int *num_octs_per_level,
+ np.int64_t sfc):
# We actually will not be initializing the root mesh here, we will be
# initializing the entire mesh between sfc_start and sfc_end.
- cdef np.int64_t oct_ind, sfc, nadded, tot_octs, ipos
- cdef np.uint8_t bits
- cdef int status
+ cdef np.int64_t oct_ind, tot_octs, ipos, nadded
+ cdef int i, status, level, num_root, num_octs
+ cdef int num_level_octs
cdef artio_fileset_handle *handle = self.artio_handle.handle
+ cdef int coords[3]
+ cdef int max_level = self.artio_handle.max_level
cdef double dpos[3]
- cdef int coords[3]
- cdef int num_oct_levels, level, i, j
- cdef int max_level = self.artio_handle.max_level
- cdef int *num_octs_per_level = <int *>malloc(
- (max_level + 1)*sizeof(int))
- cdef np.int64_t *tot_octs_per_level = <np.int64_t *>malloc(
- (max_level + 1)*sizeof(np.int64_t))
- self.level_indices = <np.int64_t *>malloc(
- (max_level + 1)*sizeof(np.int64_t))
- for level in range(max_level + 1):
- tot_octs_per_level[level] = 0
- status = artio_grid_cache_sfc_range(handle,
- self.sfc_start, self.sfc_end )
- check_artio_status(status)
- # Now we iterate and create them, level by level.
- # Note that we are doing a bit of magic to figure out how many root
- # nodes we will need at most
- cdef int nmask = self.nn[0] * self.nn[1] * self.nn[2] / 8
- cdef np.uint8_t *mask = <np.uint8_t *> malloc(
- self.nn[0] * self.nn[1] * self.nn[2]) # one bit for each one
- for i in range(nmask): mask[i] = 0
- for sfc in range(self.sfc_start, self.sfc_end + 1):
- status = artio_grid_read_root_cell_begin( handle, sfc,
- dpos, NULL, &num_oct_levels, num_octs_per_level )
- check_artio_status(status)
- artio_sfc_coords(handle, sfc, coords)
- # Now we mask that bit
- for i in range(3):
- coords[i] = <int> (coords[i]/2)
- ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
- bits = ipos % 8
- mask[ <int> (ipos/8) ] |= (1 << bits)
- for level in range(1, num_oct_levels+1):
- # Now we are simply counting so we can pre-allocate arrays.
- # Because the grids have all been cached this should be fine.
- tot_octs_per_level[level] += num_octs_per_level[level-1]
- status = artio_grid_read_root_cell_end( handle )
+ cdef np.float64_t f64pos[3], dds[3]
+ cdef np.ndarray[np.float64_t, ndim=2] pos
+ # NOTE: We do not cache any SFC ranges here, as we should only ever be
+ # called from within a pre-cached operation in the SFC handler.
+
+ # We only allow one root oct.
+ self.append_domain(oct_count)
+ self.domains[self.num_domains - 1].con_id = sfc
+
+ oct_ind = -1
+ ipos = 0
+ for level in range(num_oct_levels):
+ oct_ind = imax(oct_ind, num_octs_per_level[level])
+ self.level_indices[level] = ipos
+ ipos += num_octs_per_level[level]
+ pos = np.empty((oct_ind, 3), dtype="float64")
+
+ # Now we initialize
+ # Note that we also assume we have already started reading the level.
+ ipos = 0
+ for level in range(num_oct_levels):
+ status = artio_grid_read_level_begin(handle, level + 1)
check_artio_status(status)
- cdef np.int64_t num_root = 0
- for i in range(nmask):
- for j in range(8):
- num_root += ((mask[i] >> j) & 1)
- tot_octs_per_level[0] = num_root
- cdef np.int64_t tot = 0
- for i in range(max_level + 1):
- self.level_indices[i] = tot
- tot += tot_octs_per_level[i]
- self.allocate_domains([num_root, tot - num_root], num_root)
- # Now we have everything counted, and we need to create the appropriate
- # number of arrays.
- cdef np.ndarray[np.float64_t, ndim=2] pos
- pos = np.empty((tot, 3), dtype="float64")
- # We do a special case for level 0
- cdef np.float64_t dds[3]
- for i in range(3):
- dds[i] = (self.DRE[i] - self.DLE[i])/self.nn[i]
- for sfc in range(self.sfc_start, self.sfc_end + 1):
- status = artio_grid_read_root_cell_begin( handle, sfc,
- dpos, NULL, &num_oct_levels, num_octs_per_level)
- check_artio_status(status)
- artio_sfc_coords(handle, sfc, coords)
- # Now we check if we have added yet or not
- for i in range(3):
- coords[i] = <int> (coords[i]/2)
- ipos = ((coords[0]*self.nn[1])+coords[1])*self.nn[2]+coords[2]
- bits = ipos % 8
- if ((mask[<int>(ipos/8)] >> bits) & 1) == 1:
- # We add it here
+ for oct_ind in range(num_octs_per_level[level]):
+ status = artio_grid_read_oct(handle, dpos, NULL, NULL)
for i in range(3):
- dpos[i] = self.DLE[i] + (coords[i]+0.5)*dds[i]
- pos[self.level_indices[0], i] = dpos[i]
- mask[<int>(ipos/8)] -= (1 << bits)
- self.level_indices[0] += 1
- # Now we iterate over all the children
- for level in range(1, num_oct_levels+1):
- status = artio_grid_read_level_begin(handle, level)
- check_artio_status(status)
- for oct_ind in range(num_octs_per_level[level - 1]):
- status = artio_grid_read_oct(handle, dpos, NULL, NULL)
- check_artio_status(status)
- for i in range(3):
- pos[self.level_indices[level], i] = dpos[i]
- self.level_indices[level] += 1
- status = artio_grid_read_level_end(handle)
+ pos[oct_ind, i] = dpos[i]
check_artio_status(status)
- status = artio_grid_read_root_cell_end( handle )
+ status = artio_grid_read_level_end(handle)
check_artio_status(status)
- nadded = 0
- cdef np.int64_t si, ei
- si = 0
- # We initialize domain to 1 so that all root mesh octs are viewed as
- # not belonging to this domain. This way we don't get confused with
- # how the different meshes are interfaced, and the root mesh container
- # will own all the root mesh octs. Then, for all octs at higher
- # levels, we use domain == 2.
- cdef int domain = 1
- for level in range(max_level + 1):
- self.level_indices[level] = si
- ei = si + tot_octs_per_level[level]
- if tot_octs_per_level[level] == 0: break
- nadded = self.add(domain, level, pos[si:ei, :])
- if level > 0 and nadded != (ei - si):
- print domain, self.sfc_start, self.sfc_end
- print level, nadded, ei, si, self.max_root,
- print self.level_indices[level]
- print pos[si:ei,:]
- print nadded, (ei - si), tot_octs_per_level[0]
+ nadded = self.add(self.num_domains, level, pos[:num_octs_per_level[level],:])
+ if nadded != num_octs_per_level[level]:
raise RuntimeError
- si = ei
- domain = 2
- artio_grid_clear_sfc_cache(handle)
- free(mask)
- free(num_octs_per_level)
- free(tot_octs_per_level)
@cython.boundscheck(False)
@cython.wraparound(False)
@cython.cdivision(True)
def fill_sfc(self,
- np.ndarray[np.uint8_t, ndim=1] levels,
- np.ndarray[np.uint8_t, ndim=1] cell_inds,
- np.ndarray[np.int64_t, ndim=1] file_inds,
- field_indices, dest_fields):
+ np.ndarray[np.uint8_t, ndim=1] levels,
+ np.ndarray[np.uint8_t, ndim=1] cell_inds,
+ np.ndarray[np.int64_t, ndim=1] file_inds,
+ np.ndarray[np.int64_t, ndim=1] domain_counts,
+ field_indices, dest_fields):
cdef np.ndarray[np.float32_t, ndim=2] source
cdef np.ndarray[np.float64_t, ndim=1] dest
cdef int n, status, i, di, num_oct_levels, nf, ngv, max_level
- cdef int j, oct_ind, level
- cdef np.int64_t sfc
+ cdef int level, j, oct_ind, si
+ cdef np.int64_t sfc, ipos
cdef np.float64_t val
cdef artio_fileset_handle *handle = self.artio_handle.handle
cdef double dpos[3]
@@ -736,61 +761,70 @@
nf * sizeof(int))
cdef np.float32_t **field_vals = <np.float32_t**> malloc(
nf * sizeof(np.float32_t*))
- cdef np.int64_t *local_ind = <np.int64_t *> malloc(
- (max_level + 1) * sizeof(np.int64_t))
- for i in range(max_level + 1):
- # This will help us keep track of where we are in the flattened
- # array, which will be indexed by file_ind.
- local_ind[i] = self.level_indices[i]
source_arrays = []
+ ipos = -1
+ for i in range(self.num_domains):
+ ipos = imax(ipos, self.domains[i].n)
for i in range(nf):
field_ind[i] = field_indices[i]
- # This zeros should be an empty once we handle the root grid
- source = np.zeros((self.nocts, 8), dtype="float32")
+ # Note that we subtract one, because we're not using the root mesh.
+ source = np.zeros((ipos, 8), dtype="float32")
source_arrays.append(source)
field_vals[i] = <np.float32_t*> source.data
+ cdef np.int64_t level_position[32], lp
# First we need to walk the mesh in the file. Then we fill in the dest
# location based on the file index.
- status = artio_grid_cache_sfc_range(handle,
- self.sfc_start, self.sfc_end )
- check_artio_status(status)
- for sfc in range(self.sfc_start, self.sfc_end + 1):
+ # A few ways this could be improved:
+ # * Create a new visitor function that actually queried the data,
+ # rather than our somewhat hokey double-loop over SFC arrays.
+ # * Go to pointers for the dest arrays.
+ # * Enable preloading during mesh initialization
+ # * Calculate domain indices on the fly rather than with a
+ # double-loop to calculate domain_counts
+ cdef np.int64_t offset = 0
+ for si in range(self.num_domains):
+ sfc = self.domains[si].con_id
status = artio_grid_read_root_cell_begin( handle, sfc,
dpos, NULL, &num_oct_levels, num_octs_per_level)
check_artio_status(status)
- for level in range(1, num_oct_levels+1):
- status = artio_grid_read_level_begin(handle, level)
+ lp = 0
+ for level in range(num_oct_levels):
+ status = artio_grid_read_level_begin(handle, level + 1)
check_artio_status(status)
- for oct_ind in range(num_octs_per_level[level - 1]):
+ level_position[level] = lp
+ for oct_ind in range(num_octs_per_level[level]):
status = artio_grid_read_oct(handle, dpos, grid_variables, NULL)
check_artio_status(status)
for j in range(8):
for i in range(nf):
- field_vals[i][local_ind[level] * 8 + j] = \
- grid_variables[ngv * j + field_ind[i]]
- local_ind[level] += 1
+ field_vals[i][(oct_ind + lp)*8+j] = \
+ grid_variables[ngv*j+field_ind[i]]
status = artio_grid_read_level_end(handle)
check_artio_status(status)
+ lp += num_octs_per_level[level]
status = artio_grid_read_root_cell_end( handle )
check_artio_status(status)
- # Now we have all our sources.
- artio_grid_clear_sfc_cache(handle)
- for j in range(nf):
- dest = dest_fields[j]
- source = source_arrays[j]
- for i in range(levels.shape[0]):
- if levels[i] == 0: continue
- oct_ind = self.level_indices[levels[i]]
- dest[i] = source[file_inds[i] + oct_ind, cell_inds[i]]
+ # Now we have all our sources.
+ for j in range(nf):
+ dest = dest_fields[j]
+ source = source_arrays[j]
+ for i in range(domain_counts[si]):
+ level = levels[i + offset]
+ oct_ind = file_inds[i + offset] + level_position[level]
+ dest[i + offset] = source[oct_ind, cell_inds[i + offset]]
+ # Now, we offset by the actual number filled here.
+ offset += domain_counts[si]
free(field_ind)
free(field_vals)
- free(local_ind)
free(grid_variables)
free(num_octs_per_level)
def fill_sfc_particles(self, fields):
- rv = read_sfc_particles(self.artio_handle,
- self.sfc_start, self.sfc_end,
+ # This handles not getting particles for refined sfc values.
+ cdef np.int64_t sfc_start, sfc_end
+ sfc_start = self.domains[0].con_id
+ sfc_end = self.domains[self.num_domains - 1].con_id
+ rv = read_sfc_particles(self.artio_handle, sfc_start, sfc_end,
0, fields)
return rv
@@ -952,11 +986,11 @@
status = artio_particle_read_root_cell_end( handle )
check_artio_status(status)
- status = artio_particle_clear_sfc_cache(handle)
- check_artio_status(status)
+ #status = artio_particle_clear_sfc_cache(handle)
+ #check_artio_status(status)
- status = artio_grid_clear_sfc_cache(handle)
- check_artio_status(status)
+ #status = artio_grid_clear_sfc_cache(handle)
+ #check_artio_status(status)
free(num_octs_per_level)
free(num_particles_per_species)
@@ -976,22 +1010,23 @@
cdef artio_fileset_handle *handle
cdef np.uint64_t sfc_start
cdef np.uint64_t sfc_end
+ cdef public object _last_mask
+ cdef public object _last_selector_id
+ cdef ARTIOSFCRangeHandler range_handler
- def __init__(self, domain_dimensions, # cells
- domain_left_edge,
- domain_right_edge,
- artio_fileset artio_handle,
- sfc_start, sfc_end):
- self.artio_handle = artio_handle
- self.handle = artio_handle.handle
+ def __init__(self, ARTIOSFCRangeHandler range_handler):
cdef int i
for i in range(3):
- self.dims[i] = domain_dimensions[i]
- self.DLE[i] = domain_left_edge[i]
- self.DRE[i] = domain_right_edge[i]
- self.dds[i] = (self.DRE[i] - self.DLE[i])/self.dims[i]
- self.sfc_start = sfc_start
- self.sfc_end = sfc_end
+ self.DLE[i] = range_handler.DLE[i]
+ self.DRE[i] = range_handler.DRE[i]
+ self.dims[i] = range_handler.dims[i]
+ self.dds[i] = range_handler.dds[i]
+ self.handle = range_handler.handle
+ self.artio_handle = range_handler.artio_handle
+ self._last_mask = self._last_selector_id = None
+ self.sfc_start = range_handler.sfc_start
+ self.sfc_end = range_handler.sfc_end
+ self.range_handler = range_handler
@cython.cdivision(True)
cdef np.int64_t pos_to_sfc(self, np.float64_t pos[3]) nogil:
@@ -1019,17 +1054,16 @@
cdef int i
return self.mask(selector).sum()
- def icoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+ def icoords(self, SelectorObject selector, np.int64_t num_cells = -1,
int domain_id = -1):
# Note that num_octs does not have to equal sfc_end - sfc_start + 1.
cdef np.int64_t sfc
cdef int acoords[3], i
- # We call it num_octs, but it's really num_cells.
cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
mask = self.mask(selector)
- num_octs = mask.sum()
+ num_cells = mask.sum()
cdef np.ndarray[np.int64_t, ndim=2] coords
- coords = np.empty((num_octs, 3), dtype="int64")
+ coords = np.empty((num_cells, 3), dtype="int64")
cdef int filled = 0
for sfc in range(self.sfc_start, self.sfc_end + 1):
if mask[sfc - self.sfc_start] == 0: continue
@@ -1042,18 +1076,17 @@
filled += 1
return coords
- def fcoords(self, SelectorObject selector, np.int64_t num_octs = -1,
+ def fcoords(self, SelectorObject selector, np.int64_t num_cells = -1,
int domain_id = -1):
- # Note that num_octs does not have to equal sfc_end - sfc_start + 1.
+ # Note that num_cells does not have to equal sfc_end - sfc_start + 1.
cdef np.int64_t sfc
cdef np.float64_t pos[3]
cdef int acoords[3], i
- # We call it num_octs, but it's really num_cells.
cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
mask = self.mask(selector)
- num_octs = mask.sum()
+ num_cells = mask.sum()
cdef np.ndarray[np.float64_t, ndim=2] coords
- coords = np.empty((num_octs, 3), dtype="float64")
+ coords = np.empty((num_cells, 3), dtype="float64")
cdef int filled = 0
for sfc in range(self.sfc_start, self.sfc_end + 1):
if mask[sfc - self.sfc_start] == 0: continue
@@ -1066,27 +1099,30 @@
filled += 1
return coords
- def fwidth(self, SelectorObject selector, np.int64_t num_octs = -1,
+ def fwidth(self, SelectorObject selector, np.int64_t num_cells = -1,
int domain_id = -1):
cdef int i
cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
mask = self.mask(selector)
- num_octs = mask.sum()
+ num_cells = mask.sum()
cdef np.ndarray[np.float64_t, ndim=2] width
- width = np.zeros((num_octs, 3), dtype="float64")
+ width = np.zeros((num_cells, 3), dtype="float64")
for i in range(3):
width[:,i] = self.dds[i]
return width
- def ires(self, SelectorObject selector, np.int64_t num_octs = -1,
+ def ires(self, SelectorObject selector, np.int64_t num_cells = -1,
int domain_id = -1):
cdef np.ndarray[np.uint8_t, ndim=1, cast=True] mask
mask = self.mask(selector)
- num_octs = mask.sum()
+ num_cells = mask.sum()
cdef np.ndarray[np.int64_t, ndim=1] res
- res = np.zeros(num_octs, dtype="int64")
+ res = np.zeros(num_cells, dtype="int64")
return res
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
def selector_fill(self, SelectorObject selector,
np.ndarray source,
np.ndarray dest = None,
@@ -1131,42 +1167,30 @@
return dest
return filled
- def mask(self, SelectorObject selector, np.int64_t num_octs = -1):
- cdef int i, status
- cdef double dpos[3]
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ @cython.cdivision(True)
+ def mask(self, SelectorObject selector, np.int64_t num_cells = -1):
+ cdef int i
cdef np.float64_t pos[3]
- if num_octs == -1:
+ cdef np.int64_t sfc
+ cdef np.ndarray[np.int64_t, ndim=1] oct_count
+ if self._last_selector_id == hash(selector):
+ return self._last_mask
+ if num_cells == -1:
# We need to count, but this process will only occur one time,
- # since num_octs will later be cached.
- num_octs = self.sfc_end - self.sfc_start + 1
- assert(num_octs == (self.sfc_end - self.sfc_start + 1))
- cdef np.ndarray[np.uint8_t, ndim=1] mask
- cdef int num_oct_levels
- cdef int max_level = self.artio_handle.max_level
- cdef int *num_octs_per_level = <int *>malloc(
- (max_level + 1)*sizeof(int))
- mask = np.zeros((num_octs), dtype="uint8")
- status = artio_grid_cache_sfc_range(self.handle, self.sfc_start,
- self.sfc_end)
- check_artio_status(status)
+ # since num_cells will later be cached.
+ num_cells = self.sfc_end - self.sfc_start + 1
+ mask = np.zeros((num_cells), dtype="uint8")
+ oct_count = self.range_handler.oct_count
for sfc in range(self.sfc_start, self.sfc_end + 1):
- # We check if the SFC is in our selector, and if so, we copy
- # Note that because we initialize to zeros, we can just continue if
- # it's not included.
+ if oct_count[sfc - self.sfc_start] > 0: continue
self.sfc_to_pos(sfc, pos)
if selector.select_cell(pos, self.dds) == 0: continue
- # Now we just need to check if the cells are refined.
- status = artio_grid_read_root_cell_begin( self.handle,
- sfc, dpos, NULL, &num_oct_levels, num_octs_per_level)
- check_artio_status(status)
- status = artio_grid_read_root_cell_end( self.handle )
- check_artio_status(status)
- # If refined, we skip
- if num_oct_levels > 0: continue
mask[sfc - self.sfc_start] = 1
- artio_grid_clear_sfc_cache(self.handle)
- free(num_octs_per_level)
- return mask.astype("bool")
+ self._last_mask = mask.astype("bool")
+ self._last_selector_id = hash(selector)
+ return self._last_mask
def fill_sfc_particles(self, fields):
rv = read_sfc_particles(self.artio_handle,
@@ -1223,8 +1247,8 @@
status = artio_grid_read_root_cell_end( handle )
check_artio_status(status)
# Now we have all our sources.
- status = artio_grid_clear_sfc_cache(handle)
- check_artio_status(status)
+ #status = artio_grid_clear_sfc_cache(handle)
+ #check_artio_status(status)
free(field_ind)
free(field_vals)
free(grid_variables)
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/71840a50b446/
Changeset: 71840a50b446
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 00:40:28
Summary: Merging again.
Affected #: 10 files
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -132,9 +132,9 @@
outbase: str
This is where the out*list files that Rockstar makes should be
placed. Default is 'rockstar_halos'.
- dm_type: 1
- In order to exclude stars and other particle types, define
- the dm_type. Default is 1, as Enzo has the DM particle type=1.
+ particle_type: str
+ This is the "particle type" that can be found in the data. This can be
+ a filtered particle or an inherent type.
force_res: float
This parameter specifies the force resolution that Rockstar uses
in units of Mpc/h.
@@ -144,23 +144,17 @@
longest) in the time series:
``pf_last.h.get_smallest_dx() * pf_last['mpch']``.
total_particles : int
- If supplied, this is a pre-calculated total number of dark matter
- particles present in the simulation. For example, this is useful
- when analyzing a series of snapshots where the number of dark
- matter particles should not change and this will save some disk
- access time. If left unspecified, it will
- be calculated automatically. Default: ``None``.
- dm_only : boolean
- If set to ``True``, it will be assumed that there are only dark
- matter particles present in the simulation. This can save analysis
- time if this is indeed the case. Default: ``False``.
- hires_dm_mass : float
- If supplied, use only the highest resolution dark matter
- particles, with a mass less than (1.1*hires_dm_mass), in units
- of ParticleMassMsun. This is useful for multi-dm-mass
- simulations. Note that this will only give sensible results for
- halos that are not "polluted" by lower resolution
- particles. Default: ``None``.
+ If supplied, this is a pre-calculated total number of particles present
+ in the simulation. For example, this is useful when analyzing a series
+ of snapshots where the number of dark matter particles should not
+ change and this will save some disk access time. If left unspecified,
+ it will be calculated automatically. Default: ``None``.
+ particle_mass : float
+ If supplied, use this as the particle mass supplied to rockstar.
+ Otherwise, the smallest particle mass will be identified and calculated
+ internally. This is useful for multi-dm-mass simulations. Note that
+ this will only give sensible results for halos that are not "polluted"
+ by lower resolution particles. Default: ``None``.
Returns
-------
@@ -183,9 +177,9 @@
rh.run()
"""
def __init__(self, ts, num_readers = 1, num_writers = None,
- outbase="rockstar_halos", dm_type=1,
+ outbase="rockstar_halos", particle_type="all",
force_res=None, total_particles=None, dm_only=False,
- hires_dm_mass=None):
+ particle_mass=None):
mylog.warning("The citation for the Rockstar halo finder can be found at")
mylog.warning("http://adsabs.harvard.edu/abs/2013ApJ...762..109B")
ParallelAnalysisInterface.__init__(self)
@@ -204,7 +198,7 @@
if not isinstance(ts, TimeSeriesData):
ts = TimeSeriesData([ts])
self.ts = ts
- self.dm_type = dm_type
+ self.particle_type = particle_type
self.outbase = outbase
if force_res is None:
tpf = ts[-1] # Cache a reference
@@ -215,7 +209,7 @@
self.force_res = force_res
self.total_particles = total_particles
self.dm_only = dm_only
- self.hires_dm_mass = hires_dm_mass
+ self.particle_mass = particle_mass
# Setup pool and workgroups.
self.pool, self.workgroup = self.runner.setup_pool()
p = self._setup_parameters(ts)
@@ -226,63 +220,30 @@
def _setup_parameters(self, ts):
if self.workgroup.name != "readers": return None
tpf = ts[0]
+ ptype = self.particle_type
- def _particle_count(field, data):
- if data.NumberOfParticles == 0: return 0
- try:
- data["particle_type"]
- has_particle_type=True
- except KeyError:
- has_particle_type=False
-
- if (self.dm_only or (not has_particle_type)):
- if self.hires_dm_mass is None:
- return np.prod(data["particle_position_x"].shape)
- else:
- return (data['ParticleMassMsun'] < self.hires_dm_mass*1.1).sum()
- elif has_particle_type:
- if self.hires_dm_mass is None:
- return (data["particle_type"]==self.dm_type).sum()
- else:
- return ( (data["particle_type"]==self.dm_type) &
- (data['ParticleMassMsun'] < self.hires_dm_mass*1.1) ).sum()
- else:
- raise RuntimeError() # should never get here
-
- add_field("particle_count", function=_particle_count,
- not_in_all=True, particle_type=True)
dd = tpf.h.all_data()
# Get DM particle mass.
all_fields = set(tpf.h.derived_field_list + tpf.h.field_list)
has_particle_type = ("particle_type" in all_fields)
- if self.hires_dm_mass is None:
- for g in tpf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
-
- if (self.dm_only or (not has_particle_type)):
- iddm = Ellipsis
- elif has_particle_type:
- iddm = g["particle_type"] == self.dm_type
- else:
- iddm = Ellipsis # should never get here
-
- particle_mass = g['ParticleMassMsun'][iddm][0] / tpf.hubble_constant
- break
- else:
- particle_mass = self.hires_dm_mass / tpf.hubble_constant
+ particle_mass = self.particle_mass
+ if particle_mass is None:
+ particle_mass = dd.quantities["Extrema"](
+ (ptype, "ParticleMassMsun"), non_zero = True)[0][0]
+ particle_mass /= tpf.hubble_constant
p = {}
if self.total_particles is None:
# Get total_particles in parallel.
- p['total_particles'] = int(dd.quantities['TotalQuantity']('particle_count')[0])
+ tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
+ p['total_particles'] = int(tp)
p['left_edge'] = tpf.domain_left_edge
p['right_edge'] = tpf.domain_right_edge
p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
- p['particle_mass'] = particle_mass
+ p['particle_mass'] = self.particle_mass = particle_mass
return p
-
def __del__(self):
try:
self.pool.free_all()
@@ -306,7 +267,7 @@
(server_address, port))
self.port = str(self.port)
- def run(self, block_ratio = 1,**kwargs):
+ def run(self, block_ratio = 1, callbacks = None):
"""
"""
@@ -315,7 +276,8 @@
self._get_hosts()
self.handler.setup_rockstar(self.server_address, self.port,
len(self.ts), self.total_particles,
- self.dm_type,
+ self.particle_type,
+ particle_mass = self.particle_mass,
parallel = self.comm.size > 1,
num_readers = self.num_readers,
num_writers = self.num_writers,
@@ -323,10 +285,7 @@
block_ratio = block_ratio,
outbase = self.outbase,
force_res = self.force_res,
- particle_mass = float(self.particle_mass),
- dm_only = int(self.dm_only),
- hires_only = (self.hires_dm_mass is not None),
- **kwargs)
+ callbacks = callbacks)
# Make the directory to store the halo lists in.
if not self.outbase:
self.outbase = os.getcwd()
@@ -357,4 +316,4 @@
Reads in the out_0.list file and generates RockstarHaloList
and RockstarHalo objects.
"""
- return RockstarHaloList(self.pf,self.outbase+'/%s'%file_name)
+ return RockstarHaloList(self.ts[0], self.outbase+'/%s'%file_name)
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar_interface.pyx
@@ -18,6 +18,8 @@
cimport numpy as np
cimport cython
from libc.stdlib cimport malloc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ parallel_objects
from yt.config import ytcfg
@@ -26,9 +28,27 @@
np.int64_t id
float pos[6]
+ctypedef struct particleflat:
+ np.int64_t id
+ float pos_x
+ float pos_y
+ float pos_z
+ float vel_x
+ float vel_y
+ float vel_z
+
+cdef import from "halo.h":
+ struct halo:
+ np.int64_t id
+ float pos[6], corevel[3], bulkvel[3]
+ float m, r, child_r, mgrav, vmax, rvmax, rs, vrms, J[3], energy, spin
+ np.int64_t num_p, num_child_particles, p_start, desc, flags, n_core
+ float min_pos_err, min_vel_err, min_bulkvel_err
+
cdef import from "io_generic.h":
ctypedef void (*LPG) (char *filename, particle **p, np.int64_t *num_p)
- void set_load_particles_generic(LPG func)
+ ctypedef void (*AHG) (halo *h, particle *hp)
+ void set_load_particles_generic(LPG func, AHG afunc)
cdef import from "rockstar.h":
void rockstar(float *bounds, np.int64_t manual_subs)
@@ -139,49 +159,43 @@
# Forward declare
cdef class RockstarInterface
-cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p) with gil:
+cdef void rh_analyze_halo(halo *h, particle *hp):
+ # I don't know why, but sometimes we get halos with 0 particles.
+ if h.num_p == 0: return
+ cdef particleflat[:] pslice
+ pslice = <particleflat[:h.num_p]> (<particleflat *>hp)
+ parray = np.asarray(pslice)
+ for cb in rh.callbacks:
+ cb(rh.pf, parray)
+ # This is where we call our functions
+
+cdef void rh_read_particles(char *filename, particle **p, np.int64_t *num_p):
global SCALE_NOW
cdef np.float64_t conv[6], left_edge[6]
cdef np.ndarray[np.int64_t, ndim=1] arri
cdef np.ndarray[np.float64_t, ndim=1] arr
cdef unsigned long long pi,fi,i
- pf = rh.tsl.next()
- print 'reading from particle filename %s: %s'%(filename,pf.basename)
+ cdef np.int64_t local_parts = 0
+ pf = rh.pf = rh.tsl.next()
block = int(str(filename).rsplit(".")[-1])
n = rh.block_ratio
SCALE_NOW = 1.0/(pf.current_redshift+1.0)
# Now we want to grab data from only a subset of the grids for each reader.
all_fields = set(pf.h.derived_field_list + pf.h.field_list)
- has_particle_type = ("particle_type" in all_fields)
# First we need to find out how many this reader is going to read in
# if the number of readers > 1.
+ dd = pf.h.all_data()
+
if NUM_BLOCKS > 1:
local_parts = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
- if (rh.dm_only or (not has_particle_type)):
- if rh.hires_only:
- iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
- else:
- iddm = Ellipsis
- elif has_particle_type:
- if rh.hires_only:
- iddm = ( (g["particle_type"]==rh.dm_type) &
- (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )
- else:
- iddm = g["particle_type"] == rh.dm_type
- else:
- iddm = Ellipsis # should never get here
- arri = g["particle_index"].astype("int64")
- arri = arri[iddm] #pick only DM
- local_parts += arri.size
+ for chunk in parallel_objects(
+ dd.chunks([(rh.particle_type, "particle_ones")], "io")):
+ local_parts += chunk[rh.particle_type, "particle_ones"].sum()
else:
local_parts = TOTAL_PARTICLES
- #print "local_parts", local_parts
-
p[0] = <particle *> malloc(sizeof(particle) * local_parts)
conv[0] = conv[1] = conv[2] = pf["mpchcm"]
@@ -191,33 +205,22 @@
left_edge[2] = pf.domain_left_edge[2]
left_edge[3] = left_edge[4] = left_edge[5] = 0.0
pi = 0
- for g in pf.h._get_objs("grids"):
- if g.NumberOfParticles == 0: continue
- if (rh.dm_only or (not has_particle_type)):
- if rh.hires_only:
- iddm = (g['ParticleMassMsun'] < PARTICLE_MASS*1.1)
- else:
- iddm = Ellipsis
- elif has_particle_type:
- if rh.hires_only:
- iddm = ( (g["particle_type"]==rh.dm_type) &
- (g['ParticleMassMsun'] < PARTICLE_MASS*1.1) )
- else:
- iddm = g["particle_type"] == rh.dm_type
- else:
- iddm = Ellipsis # should never get here
- arri = g["particle_index"].astype("int64")
- arri = arri[iddm] #pick only DM
+ fields = [ (rh.particle_type, f) for f in
+ ["particle_position_%s" % ax for ax in 'xyz'] +
+ ["particle_velocity_%s" % ax for ax in 'xyz'] +
+ ["particle_index"]]
+ for chunk in parallel_objects(dd.chunks(fields, "io")):
+ arri = np.asarray(chunk[rh.particle_type, "particle_index"],
+ dtype="int64")
npart = arri.size
for i in range(npart):
- p[0][i+pi].id = arri[i]
+ p[0][i+pi].id = <np.int64_t> arri[i]
fi = 0
for field in ["particle_position_x", "particle_position_y",
"particle_position_z",
"particle_velocity_x", "particle_velocity_y",
"particle_velocity_z"]:
- arr = g[field].astype("float64")
- arr = arr[iddm] #pick DM
+ arr = chunk[rh.particle_type, field].astype("float64")
for i in range(npart):
p[0][i+pi].pos[fi] = (arr[i]-left_edge[fi])*conv[fi]
fi += 1
@@ -229,13 +232,13 @@
cdef public object data_source
cdef public object ts
cdef public object tsl
+ cdef public object pf
cdef int rank
cdef int size
cdef public int block_ratio
- cdef public int dm_type
+ cdef public object particle_type
cdef public int total_particles
- cdef public int dm_only
- cdef public int hires_only
+ cdef public object callbacks
def __cinit__(self, ts):
self.ts = ts
@@ -243,14 +246,14 @@
def setup_rockstar(self, char *server_address, char *server_port,
int num_snaps, np.int64_t total_particles,
- int dm_type,
+ particle_type,
np.float64_t particle_mass,
int parallel = False, int num_readers = 1,
int num_writers = 1,
int writing_port = -1, int block_ratio = 1,
int periodic = 1, force_res=None,
int min_halo_size = 25, outbase = "None",
- int dm_only = 0, int hires_only = False):
+ callbacks = None):
global PARALLEL_IO, PARALLEL_IO_SERVER_ADDRESS, PARALLEL_IO_SERVER_PORT
global FILENAME, FILE_FORMAT, NUM_SNAPS, STARTING_SNAP, h0, Ol, Om
global BOX_SIZE, PERIODIC, PARTICLE_MASS, NUM_BLOCKS, NUM_READERS
@@ -281,14 +284,15 @@
MIN_HALO_OUTPUT_SIZE=min_halo_size
TOTAL_PARTICLES = total_particles
self.block_ratio = block_ratio
- self.dm_only = dm_only
- self.hires_only = hires_only
+ self.particle_type = particle_type
tpf = self.ts[0]
h0 = tpf.hubble_constant
Ol = tpf.omega_lambda
Om = tpf.omega_matter
SCALE_NOW = 1.0/(tpf.current_redshift+1.0)
+ if callbacks is None: callbacks = []
+ self.callbacks = callbacks
if not outbase =='None'.decode('UTF-8'):
#output directory. since we can't change the output filenames
#workaround is to make a new directory
@@ -300,9 +304,9 @@
tpf.domain_left_edge[0]) * tpf['mpchcm']
setup_config()
rh = self
- rh.dm_type = dm_type
cdef LPG func = rh_read_particles
- set_load_particles_generic(func)
+ cdef AHG afunc = rh_analyze_halo
+ set_load_particles_generic(func, afunc)
def call_rockstar(self):
read_particles("generic")
@@ -320,3 +324,4 @@
def start_writer(self):
cdef np.int64_t in_type = np.int64(WRITER_TYPE)
client(in_type)
+
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -230,6 +230,7 @@
gen_obj = self
else:
gen_obj = self._current_chunk.objs[0]
+ gen_obj.field_parameters = self.field_parameters
try:
finfo.check_available(gen_obj)
except NeedsGridType as ngt_exception:
@@ -397,6 +398,8 @@
ftype = self._current_particle_type
else:
ftype = self._current_fluid_type
+ if (ftype, fname) not in self.pf.field_info:
+ ftype = "gas"
if finfo.particle_type and ftype not in self.pf.particle_types:
raise YTFieldTypeNotFound(ftype)
elif not finfo.particle_type and ftype not in self.pf.fluid_types:
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -221,33 +221,54 @@
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
-def _StarAngularMomentumVector(data):
+def _StarAngularMomentumVector(data, ftype=None):
"""
This function returns the mass-weighted average angular momentum vector
for stars.
"""
- is_star = data["creation_time"] > 0
- star_mass = data["ParticleMassMsun"][is_star]
- sLx = data["ParticleSpecificAngularMomentumX"][is_star]
- sLy = data["ParticleSpecificAngularMomentumY"][is_star]
- sLz = data["ParticleSpecificAngularMomentumZ"][is_star]
- amx = sLx * star_mass
- amy = sLy * star_mass
- amz = sLz * star_mass
+ if ftype is None:
+ is_star = data["creation_time"] > 0
+ star_mass = data["ParticleMassMsun"][is_star]
+ else:
+ is_star = Ellipsis
+ key = (ftype, "ParticleSpecificAngularMomentum%s")
+ j_mag = np.ones(3, dtype='f8')
+ for i, ax in enumerate("XYZ"):
+ j_mag[i] = data[key % ax][is_star]
+ j_mag[i] *= star_mass
j_mag = [amx.sum(dtype=np.float64), amy.sum(dtype=np.float64), amz.sum(dtype=np.float64)]
return [j_mag]
+def _ParticleAngularMomentumVector(data):
+ """
+ This function returns the mass-weighted average angular momentum vector
+ for all particles.
+ """
+ mass = data["ParticleMass"]
+ sLx = data["ParticleSpecificAngularMomentumX"]
+ sLy = data["ParticleSpecificAngularMomentumY"]
+ sLz = data["ParticleSpecificAngularMomentumZ"]
+ amx = sLx * mass
+ amy = sLy * mass
+ amz = sLz * mass
+ j_mag = [amx.sum(), amy.sum(), amz.sum()]
+ return [j_mag]
+
def _combAngularMomentumVector(data, j_mag):
if len(j_mag.shape) < 2: j_mag = np.expand_dims(j_mag, 0)
L_vec = j_mag.sum(axis=0,dtype=np.float64)
L_vec_norm = L_vec / np.sqrt((L_vec**2.0).sum(dtype=np.float64))
return L_vec_norm
+
add_quantity("AngularMomentumVector", function=_AngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
add_quantity("StarAngularMomentumVector", function=_StarAngularMomentumVector,
combine_function=_combAngularMomentumVector, n_ret=1)
+add_quantity("ParticleAngularMomentumVector", function=_ParticleAngularMomentumVector,
+ combine_function=_combAngularMomentumVector, n_ret=1)
+
def _BaryonSpinParameter(data):
"""
This function returns the spin parameter for the baryons, but it uses
@@ -716,3 +737,28 @@
add_quantity("ParticleDensityCenter",function=_ParticleDensityCenter,
combine_function=_combParticleDensityCenter,n_ret=2)
+
+def _HalfMass(data, field):
+ """
+ Cumulative sum the given mass field and find
+ at what radius the half mass is. Simple but
+ memory-expensive method.
+ """
+ d = np.nan_to_num(data[field])
+ r = data['Radius']
+ return d, r
+
+def _combHalfMass(data, field_vals, radii, frac=0.5):
+ fv = np.concatenate(field_vals.tolist()).ravel()
+ r = np.concatenate(radii.tolist()).ravel()
+ idx = np.argsort(r)
+ r = r[idx]
+ fv = np.cumsum(fv[idx])
+ idx, = np.where(fv / fv[-1] > frac)
+ if len(idx) > 0:
+ return r[idx[0]]
+ else:
+ return np.nan
+
+add_quantity("HalfMass",function=_HalfMass,
+ combine_function=_combHalfMass,n_ret=2)
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -263,6 +263,8 @@
if hasattr(self.pf, "field_info"):
if not isinstance(item, tuple):
field = ("unknown", item)
+ finfo = self.pf._get_field_info(*field)
+ mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
finfo = self.pf._get_field_info(*field)
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ b/yt/data_objects/particle_fields.py
@@ -114,6 +114,14 @@
# Now some translation functions.
+ def particle_ones(field, data):
+ return np.ones(data[ptype, mass_name].shape, dtype="float64")
+
+ registry.add_field((ptype, "particle_ones"),
+ function = particle_ones,
+ particle_type = True,
+ units = "")
+
registry.add_field((ptype, "ParticleMass"),
function = TranslationFunc((ptype, mass_name)),
particle_type = True,
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/data_objects/universal_fields.py
--- a/yt/data_objects/universal_fields.py
+++ b/yt/data_objects/universal_fields.py
@@ -675,6 +675,13 @@
yv = data["particle_velocity_y"] - bv[1]
zv = data["particle_velocity_z"] - bv[2]
return yv*z - zv*y
+add_field("ParticleSpecificAngularMomentumX",
+ function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumX_KMSMPC", function=_ParticleSpecificAngularMomentumX, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumY(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -685,6 +692,13 @@
xv = data["particle_velocity_x"] - bv[0]
zv = data["particle_velocity_z"] - bv[2]
return -(xv*z - zv*x)
+add_field("ParticleSpecificAngularMomentumY",
+ function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumY_KMSMPC", function=_ParticleSpecificAngularMomentumY, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleSpecificAngularMomentumZ(field, data):
if data.has_field_parameter("bulk_velocity"):
bv = data.get_field_parameter("bulk_velocity")
@@ -695,14 +709,13 @@
xv = data["particle_velocity_x"] - bv[0]
yv = data["particle_velocity_y"] - bv[1]
return xv*y - yv*x
-for ax in 'XYZ':
- n = "ParticleSpecificAngularMomentum%s" % ax
- add_field(n, function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentum,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
- add_field(n + "KMSMPC", function=eval("_%s" % n), particle_type=True,
- convert_function=_convertSpecificAngularMomentumKMSMPC,
- units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ",
+ function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentum,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
+add_field("ParticleSpecificAngularMomentumZ_KMSMPC", function=_ParticleSpecificAngularMomentumZ, particle_type=True,
+ convert_function=_convertSpecificAngularMomentumKMSMPC,
+ units=r"\rm{cm}^2/\rm{s}", validators=[ValidateParameter("center")])
def _ParticleAngularMomentum(field, data):
return data["ParticleMass"] * data["ParticleSpecificAngularMomentum"]
@@ -717,17 +730,17 @@
# particle_type=True, validators=[ValidateParameter('center')])
def _ParticleAngularMomentumX(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumX"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumX"]
add_field("ParticleAngularMomentumX", function=_ParticleAngularMomentumX,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumY(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumY"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumY"]
add_field("ParticleAngularMomentumY", function=_ParticleAngularMomentumY,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
def _ParticleAngularMomentumZ(field, data):
- return data["CellMass"] * data["ParticleSpecificAngularMomentumZ"]
+ return data["particle_mass"] * data["ParticleSpecificAngularMomentumZ"]
add_field("ParticleAngularMomentumZ", function=_ParticleAngularMomentumZ,
units=r"\rm{g}\/\rm{cm}^2/\rm{s}", particle_type=True,
validators=[ValidateParameter('center')])
@@ -858,6 +871,58 @@
add_field("RadialVelocityKMSABS", function=_RadialVelocityABS,
convert_function=_ConvertRadialVelocityKMS, units=r"\rm{km}/\rm{s}")
+def _ParticleRadiusSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ sphr = get_sph_r_component(pos, theta, phi, normal)
+ return sphr
+
+add_field("ParticleRadiusSpherical", function=_ParticleRadiusSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticleThetaSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ spht = get_sph_theta_component(pos, theta, phi, normal)
+ return spht
+
+add_field("ParticleThetaSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
+def _ParticlePhiSpherical(field, data):
+ normal = data.get_field_parameter('normal')
+ center = data.get_field_parameter('center')
+ bv = data.get_field_parameter("bulk_velocity")
+ pos = "particle_position_%s"
+ pos = np.array([data[pos % ax] for ax in "xyz"])
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
+ pos = pos - np.reshape(center, (3, 1))
+ vel = vel - np.reshape(bv, (3, 1))
+ sphp = get_sph_phi_component(pos, theta, phi, normal)
+ return sphp
+
+add_field("ParticlePhiSpherical", function=_ParticleThetaSpherical,
+ particle_type=True, units=r"\rm{cm}/\rm{s}",
+ validators=[ValidateParameter("normal"),
+ ValidateParameter("center")])
+
def _ParticleRadialVelocity(field, data):
normal = data.get_field_parameter('normal')
center = data.get_field_parameter('center')
@@ -866,8 +931,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphr = get_sph_r_component(vel, theta, phi, normal)
@@ -886,8 +951,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
spht = get_sph_theta_component(vel, theta, phi, normal)
@@ -906,8 +971,8 @@
pos = np.array([data[pos % ax] for ax in "xyz"])
vel = "particle_velocity_%s"
vel = np.array([data[vel % ax] for ax in "xyz"])
- theta = get_sph_theta(pos.copy(), center)
- phi = get_sph_phi(pos.copy(), center)
+ theta = get_sph_theta(pos, center)
+ phi = get_sph_phi(pos, center)
pos = pos - np.reshape(center, (3, 1))
vel = vel - np.reshape(bv, (3, 1))
sphp = get_sph_phi_component(vel, phi, normal)
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/frontends/art/fields.py
--- a/yt/frontends/art/fields.py
+++ b/yt/frontends/art/fields.py
@@ -283,6 +283,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def baryon_mass(field, data):
+ rho = data["deposit", "baryon_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "baryon_mass"),
+ function = baryon_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Baryon Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def total_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "specie0_density"]
@@ -296,6 +308,18 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def total_mass(field, data):
+ rho = data["deposit", "total_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "total_mass"),
+ function = total_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Total Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
def multimass_density(field, data):
rho = data["deposit", "baryon_density"]
rho += data["deposit", "darkmatter_density"]
@@ -309,3 +333,15 @@
projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
projection_conversion = 'cm')
+def multimass_mass(field, data):
+ rho = data["deposit", "multimass_density"]
+ return rho * data['CellVolume']
+
+ARTFieldInfo.add_field(("deposit", "multimass_mass"),
+ function = multimass_mass,
+ validators = [ValidateSpatial()],
+ display_name = "\\mathrm{Multimass Mass}",
+ units = r"\mathrm{g}/\mathrm{cm}^{3}",
+ projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
+ projection_conversion = 'cm')
+
diff -r e62a4472597af9572321605961e446aef5c32dba -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f yt/frontends/sph/fields.py
--- a/yt/frontends/sph/fields.py
+++ b/yt/frontends/sph/fields.py
@@ -120,7 +120,7 @@
for fname in ["Coordinates", "Velocities", "ParticleIDs",
# Note: Mass, not Masses
- "Mass"]:
+ "Mass", "particle_index"]:
func = _field_concat(fname)
field_registry.add_field(("all", fname), function=func,
particle_type = True)
@@ -139,6 +139,10 @@
particle_scalar_functions(ptype, "Coordinates", "Velocities", field_registry)
known_registry.add_field((ptype, "Coordinates"), function=NullFunc,
particle_type = True)
+ # Now we add some translations.
+ GadgetFieldInfo.add_field( (ptype, "particle_index"),
+ function = TranslationFunc((ptype, "ParticleIDs")),
+ particle_type = True)
particle_deposition_functions("all", "Coordinates", "Mass", field_registry)
# Now we have to manually apply the splits for "all", since we don't want to
https://bitbucket.org/yt_analysis/yt-3.0/commits/8c01d1eca6a8/
Changeset: 8c01d1eca6a8
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 03:14:24
Summary: For those particles without coordinates, skip any particle fields for them.
Affected #: 1 file
diff -r 71840a50b4468dff4359da3bb2ecaec6aa2f483f -r 8c01d1eca6a86ef07fd581405beb5d73ebaf386a yt/frontends/sph/io.py
--- a/yt/frontends/sph/io.py
+++ b/yt/frontends/sph/io.py
@@ -99,6 +99,7 @@
ind = 0
for key in f.keys():
if not key.startswith("PartType"): continue
+ if "Coordinates" not in f[key]: continue
ds = f[key]["Coordinates"]
dt = ds.dtype.newbyteorder("N") # Native
pos = np.empty(ds.shape, dtype=dt)
@@ -125,6 +126,7 @@
for key in f.keys():
if not key.startswith("PartType"): continue
g = f[key]
+ if "Coordinates" not in g: continue
#ptype = int(key[8:])
ptype = str(key)
for k in g.keys():
https://bitbucket.org/yt_analysis/yt-3.0/commits/a72cdf38e4f2/
Changeset: a72cdf38e4f2
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 03:53:06
Summary: We should multiply by hubble_constant, not divide.
Affected #: 1 file
diff -r 8c01d1eca6a86ef07fd581405beb5d73ebaf386a -r a72cdf38e4f22d947df212ee2dc32dc4bcf18635 yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -231,7 +231,9 @@
if particle_mass is None:
particle_mass = dd.quantities["Extrema"](
(ptype, "ParticleMassMsun"), non_zero = True)[0][0]
- particle_mass /= tpf.hubble_constant
+ # NOTE: We want to take our Msun and turn it into Msun/h . Its value
+ # should be such that dividing by little h gives the original value.
+ particle_mass *= tpf.hubble_constant
p = {}
if self.total_particles is None:
https://bitbucket.org/yt_analysis/yt-3.0/commits/0c2113039560/
Changeset: 0c2113039560
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 16:27:57
Summary: Adding a first set of tests for ParticleRegions.
Affected #: 1 file
diff -r a72cdf38e4f22d947df212ee2dc32dc4bcf18635 -r 0c2113039560dfa94462c7c13d6299ac89a292ae yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -1,6 +1,8 @@
from yt.testing import *
import numpy as np
-from yt.geometry.particle_oct_container import ParticleOctreeContainer
+from yt.geometry.particle_oct_container import \
+ ParticleOctreeContainer, \
+ ParticleRegions
from yt.geometry.oct_container import _ORDER_MAX
from yt.utilities.lib.geometry_utils import get_morton_indices
from yt.frontends.stream.api import load_particles
@@ -88,6 +90,29 @@
cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
yield assert_equal, cv1, cv2
+def test_particle_regions():
+ np.random.seed(int(0x4d3d3d3))
+ # We are going to test having 31, 127, 128 and 257 data files.
+ for nfiles in [31, 127, 128, 257]:
+ # Now we create particles
+ N = min(nfiles, 256)
+ reg = ParticleRegions([0.0, 0.0, 0.0, 0.0],
+ [nfiles, nfiles, nfiles],
+ [N, N, N], nfiles)
+ Y, Z = np.mgrid[0.1 : nfiles - 0.1 : nfiles * 1j,
+ 0.1 : nfiles - 0.1 : nfiles * 1j]
+ X = 0.5 * np.ones(Y.shape, dtype="float64")
+ pos = np.array([X.ravel(),Y.ravel(),Z.ravel()],
+ dtype="float64").transpose()
+ for i in range(nfiles):
+ reg.add_data_file(pos, i)
+ pos[:,0] += 1.0
+ for mask in reg.masks:
+ maxs = np.unique(mask.max(axis=-1).max(axis=-1))
+ mins = np.unique(mask.min(axis=-1).min(axis=-1))
+ yield assert_equal, maxs, mins
+ yield assert_equal, maxs, np.unique(mask)
+
if __name__=="__main__":
for i in test_add_particles_random():
i[0](*i[1:])
https://bitbucket.org/yt_analysis/yt-3.0/commits/4e21009fdf59/
Changeset: 4e21009fdf59
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 16:37:30
Summary: A few failing tests.
Affected #: 1 file
diff -r 0c2113039560dfa94462c7c13d6299ac89a292ae -r 4e21009fdf5960cf7bb6d3aef6a76c8c1a3da5bc yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -6,6 +6,7 @@
from yt.geometry.oct_container import _ORDER_MAX
from yt.utilities.lib.geometry_utils import get_morton_indices
from yt.frontends.stream.api import load_particles
+from yt.geometry.selection_routines import RegionSelector
import yt.data_objects.api
import time, os
@@ -90,6 +91,22 @@
cv2 = dd2["CellVolumeCode"].sum(dtype="float64")
yield assert_equal, cv1, cv2
+class FakePF:
+ domain_left_edge = None
+ domain_right_edge = None
+ periodicity = (False, False, False)
+
+class FakeRegion:
+ def __init__(self, nfiles):
+ self.pf = FakePF()
+ self.pf.domain_left_edge = [0.0, 0.0, 0.0]
+ self.pf.domain_right_edge = [nfiles, nfiles, nfiles]
+ self.nfiles = nfiles
+
+ def set_edges(self, file_id):
+ self.left_edge = [file_id + 0.1, 0.0, 0.0]
+ self.right_edge = [file_id+1 - 0.1, self.nfiles, self.nfiles]
+
def test_particle_regions():
np.random.seed(int(0x4d3d3d3))
# We are going to test having 31, 127, 128 and 257 data files.
@@ -107,6 +124,16 @@
for i in range(nfiles):
reg.add_data_file(pos, i)
pos[:,0] += 1.0
+ pos[:,0] = 0.5
+ fr = FakeRegion(nfiles)
+ for i in range(nfiles):
+ fr.set_edges(i)
+ selector = RegionSelector(fr)
+ df = reg.identify_data_files(selector)
+ yield assert_equal, len(df), 1
+ yield assert_equal, df[0], i
+ pos[:,0] += 1.0
+
for mask in reg.masks:
maxs = np.unique(mask.max(axis=-1).max(axis=-1))
mins = np.unique(mask.min(axis=-1).min(axis=-1))
https://bitbucket.org/yt_analysis/yt-3.0/commits/be65139d36fb/
Changeset: be65139d36fb
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 22:52:15
Summary: Adding a few more tests and fixing casting error.
Affected #: 2 files
diff -r 4e21009fdf5960cf7bb6d3aef6a76c8c1a3da5bc -r be65139d36fbe537764fa1ea0af35726a0551225 yt/geometry/particle_oct_container.pyx
--- a/yt/geometry/particle_oct_container.pyx
+++ b/yt/geometry/particle_oct_container.pyx
@@ -255,12 +255,14 @@
np.float32_t
np.float64_t
+cdef np.uint64_t ONEBIT=1
+
cdef class ParticleRegions:
cdef np.float64_t left_edge[3]
cdef np.float64_t dds[3]
cdef np.float64_t idds[3]
cdef np.int32_t dims[3]
- cdef public int nfiles
+ cdef public np.uint64_t nfiles
cdef public object masks
def __init__(self, left_edge, right_edge, dims, nfiles):
@@ -283,13 +285,13 @@
self._mask_positions[np.float64_t](pos, file_id)
cdef void _mask_positions(self, np.ndarray[anyfloat, ndim=2] pos,
- int file_id):
+ np.uint64_t file_id):
cdef np.int64_t no = pos.shape[0]
cdef np.int64_t p
cdef int ind[3], i
cdef np.ndarray[np.uint64_t, ndim=3] mask
mask = self.masks[file_id/64]
- cdef np.int64_t val = 1 << (file_id - (file_id/64)*64)
+ cdef np.uint64_t val = ONEBIT << (file_id - (file_id/64)*64)
for p in range(no):
# Now we locate the particle
for i in range(3):
@@ -299,7 +301,7 @@
def identify_data_files(self, SelectorObject selector):
# This is relatively cheap to iterate over.
cdef int i, j, k, n
- cdef np.uint64_t fmask, offset
+ cdef np.uint64_t fmask, offset, fcheck
cdef np.float64_t LE[3], RE[3]
cdef np.ndarray[np.uint64_t, ndim=3] mask
files = []
@@ -324,8 +326,8 @@
LE[0] += self.dds[0]
RE[0] += self.dds[0]
# Now we iterate through...
- for i in range(64):
- if ((fmask >> i) & 1) == 1:
- files.append(i + n * 64)
+ for fcheck in range(64):
+ if ((fmask >> fcheck) & ONEBIT) == ONEBIT:
+ files.append(fcheck + n * 64)
return files
diff -r 4e21009fdf5960cf7bb6d3aef6a76c8c1a3da5bc -r be65139d36fbe537764fa1ea0af35726a0551225 yt/geometry/tests/test_particle_octree.py
--- a/yt/geometry/tests/test_particle_octree.py
+++ b/yt/geometry/tests/test_particle_octree.py
@@ -110,9 +110,11 @@
def test_particle_regions():
np.random.seed(int(0x4d3d3d3))
# We are going to test having 31, 127, 128 and 257 data files.
- for nfiles in [31, 127, 128, 257]:
+ for nfiles in [2, 31, 127, 128, 129]:
# Now we create particles
- N = min(nfiles, 256)
+ # Note: we set N to nfiles here for testing purposes. Inside the code
+ # we set it to min(N, 256)
+ N = nfiles
reg = ParticleRegions([0.0, 0.0, 0.0, 0.0],
[nfiles, nfiles, nfiles],
[N, N, N], nfiles)
https://bitbucket.org/yt_analysis/yt-3.0/commits/4610fa758c9d/
Changeset: 4610fa758c9d
Branch: yt-3.0
User: MatthewTurk
Date: 2013-09-28 23:19:50
Summary: Merging from mainline
Affected #: 3 files
diff -r be65139d36fbe537764fa1ea0af35726a0551225 -r 4610fa758c9d1dc584eb1ce9f182ec0735773ffe yt/frontends/sph/api.py
--- a/yt/frontends/sph/api.py
+++ b/yt/frontends/sph/api.py
@@ -17,6 +17,7 @@
from .data_structures import \
OWLSStaticOutput, \
GadgetStaticOutput, \
+ GadgetHDF5StaticOutput, \
TipsyStaticOutput
from .io import \
diff -r be65139d36fbe537764fa1ea0af35726a0551225 -r 4610fa758c9d1dc584eb1ce9f182ec0735773ffe yt/mods.py
--- a/yt/mods.py
+++ b/yt/mods.py
@@ -118,7 +118,8 @@
from yt.frontends.sph.api import \
OWLSStaticOutput, OWLSFieldInfo, add_owls_field, \
- GadgetStaticOutput, GadgetFieldInfo, add_gadget_field, \
+ GadgetStaticOutput, GadgetHDF5StaticOutput, \
+ GadgetFieldInfo, add_gadget_field, \
TipsyStaticOutput, TipsyFieldInfo, add_tipsy_field
from yt.analysis_modules.list_modules import \
diff -r be65139d36fbe537764fa1ea0af35726a0551225 -r 4610fa758c9d1dc584eb1ce9f182ec0735773ffe yt/utilities/command_line.py
--- a/yt/utilities/command_line.py
+++ b/yt/utilities/command_line.py
@@ -1225,8 +1225,7 @@
lo.main( None, download=args.number )
class YTNotebookUploadCmd(YTCommand):
- args = (dict(short="file", type=str),
- dict(long="--title", short="-t", type=str, default = None))
+ args = (dict(short="file", type=str),)
description = \
"""
Upload an IPython notebook to hub.yt-project.org.
@@ -1245,11 +1244,8 @@
t = json.loads(open(filename).read())['metadata']['name']
except (ValueError, KeyError):
print "File does not appear to be an IPython notebook."
- if args.title is not None:
- t = args.title
if len(t) == 0:
- print "You need to specify a title with --title ."
- return 1
+ t = filename.strip(".ipynb")
from yt.utilities.minimal_representation import MinimalNotebook
mn = MinimalNotebook(filename, t)
rv = mn.upload()
Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list