[yt-svn] commit/yt-3.0: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Tue Oct 22 04:11:08 PDT 2013
5 new commits in yt-3.0:
https://bitbucket.org/yt_analysis/yt-3.0/commits/5583f1046fce/
Changeset: 5583f1046fce
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-06 10:56:28
Summary: derived quantity API improvements.
Affected #: 2 files
diff -r c86499f6891e4b191a4ac7cbcd44959ed0b9e245 -r 5583f1046fceb372ba8d5471600283f7ee3597e9 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -746,6 +746,8 @@
self.coords = None
self._grids = None
self.quantities = DerivedQuantityCollection(self)
+ for f in self.quantities.keys():
+ self.__dict__[camelcase_to_underscore(f)] = self.quantities[f]
def cut_region(self, field_cuts):
"""
diff -r c86499f6891e4b191a4ac7cbcd44959ed0b9e245 -r 5583f1046fceb372ba8d5471600283f7ee3597e9 yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import __builtin__
-import time, types, signal, inspect, traceback, sys, pdb, os
+import time, types, signal, inspect, traceback, sys, pdb, os, re
import contextlib
import warnings, struct, subprocess
import numpy as np
@@ -625,3 +625,7 @@
return
if not os.path.exists(my_dir):
only_on_root(os.makedirs, my_dir)
+
+def camelcase_to_underscore(name):
+ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
https://bitbucket.org/yt_analysis/yt-3.0/commits/1613797d60ad/
Changeset: 1613797d60ad
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 07:44:13
Summary: Merging with yt-3.0 tip.
Affected #: 151 files
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb .hgignore
--- a/.hgignore
+++ b/.hgignore
@@ -15,6 +15,7 @@
yt/geometry/oct_visitors.c
yt/geometry/particle_deposit.c
yt/geometry/particle_oct_container.c
+yt/geometry/particle_smooth.c
yt/geometry/selection_routines.c
yt/utilities/amr_utils.c
yt/utilities/kdtree/forthonf2c.h
@@ -31,6 +32,7 @@
yt/utilities/lib/geometry_utils.c
yt/utilities/lib/Interpolators.c
yt/utilities/lib/kdtree.c
+yt/utilities/lib/mesh_utilities.c
yt/utilities/lib/misc_utilities.c
yt/utilities/lib/Octree.c
yt/utilities/lib/png_writer.c
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb doc/get_yt.sh
--- a/doc/get_yt.sh
+++ b/doc/get_yt.sh
@@ -263,8 +263,6 @@
YT_DEPS+=('python')
YT_DEPS+=('distribute')
YT_DEPS+=('libpng')
-YT_DEPS+=('freetype')
-YT_DEPS+=('hdf5')
YT_DEPS+=('numpy')
YT_DEPS+=('pygments')
YT_DEPS+=('jinja2')
@@ -275,6 +273,7 @@
YT_DEPS+=('h5py')
YT_DEPS+=('matplotlib')
YT_DEPS+=('cython')
+YT_DEPS+=('nose')
# Here is our dependency list for yt
log_cmd conda config --system --add channels http://repo.continuum.io/pkgs/free
@@ -301,7 +300,6 @@
export HDF5_DIR=${DEST_DIR}
log_cmd hg clone -r ${BRANCH} https://bitbucket.org/yt_analysis/yt ${YT_DIR}
pushd ${YT_DIR}
- echo $DEST_DIR > hdf5.cfg
log_cmd python setup.py develop
popd
log_cmd cp ${YT_DIR}/doc/activate ${DEST_DIR}/bin/activate
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -941,9 +941,7 @@
( ${HG_EXEC} pull 2>1 && ${HG_EXEC} up -C 2>1 ${BRANCH} 2>&1 ) 1>> ${LOG_FILE}
echo "Installing yt"
-echo $HDF5_DIR > hdf5.cfg
[ $INST_PNG -eq 1 ] && echo $PNG_DIR > png.cfg
-[ $INST_FTYPE -eq 1 ] && echo $FTYPE_DIR > freetype.cfg
( export PATH=$DEST_DIR/bin:$PATH ; ${DEST_DIR}/bin/python2.7 setup.py develop 2>&1 ) 1>> ${LOG_FILE} || do_exit
touch done
cd $MY_PWD
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -103,5 +103,8 @@
TwoPointFunctions, \
FcnSet
+from .sunyaev_zeldovich.api import SZProjection
+
from .radmc3d_export.api import \
RadMC3DWriter
+
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/halo_finding/rockstar/rockstar.py
--- a/yt/analysis_modules/halo_finding/rockstar/rockstar.py
+++ b/yt/analysis_modules/halo_finding/rockstar/rockstar.py
@@ -233,7 +233,8 @@
pmass_min, pmass_max = dd.quantities["Extrema"](
(ptype, "ParticleMassMsun"), non_zero = True)[0]
if pmass_min != pmass_max:
- raise YTRockstarMultiMassNotSupported
+ raise YTRockstarMultiMassNotSupported(pmass_min, pmass_max,
+ ptype)
particle_mass = pmass_min
# NOTE: We want to take our Msun and turn it into Msun/h . Its value
# should be such that dividing by little h gives the original value.
@@ -244,6 +245,7 @@
# Get total_particles in parallel.
tp = dd.quantities['TotalQuantity']((ptype, "particle_ones"))[0]
p['total_particles'] = int(tp)
+ mylog.warning("Total Particle Count: %0.3e", int(tp))
p['left_edge'] = tpf.domain_left_edge
p['right_edge'] = tpf.domain_right_edge
p['center'] = (tpf.domain_right_edge + tpf.domain_left_edge)/2.0
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/setup.py
--- a/yt/analysis_modules/setup.py
+++ b/yt/analysis_modules/setup.py
@@ -21,4 +21,5 @@
config.add_subpackage("star_analysis")
config.add_subpackage("two_point_functions")
config.add_subpackage("radmc3d_export")
+ config.add_subpackage("sunyaev_zeldovich")
return config
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunrise_export/sunrise_exporter.py
--- a/yt/analysis_modules/sunrise_export/sunrise_exporter.py
+++ b/yt/analysis_modules/sunrise_export/sunrise_exporter.py
@@ -25,7 +25,7 @@
from yt.utilities.physical_constants import \
kpc_per_cm, \
sec_per_year
-from yt.data_objects.universal_fields import add_field
+from yt.fields.universal_fields import add_field
from yt.mods import *
def export_to_sunrise(pf, fn, star_particle_type, fc, fwidth, ncells_wide=None,
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/api.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/api.py
@@ -0,0 +1,12 @@
+"""
+API for sunyaev_zeldovich
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from projection import SZProjection
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/projection.py
@@ -0,0 +1,354 @@
+"""
+Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (at least
+version 1.1.1) to be downloaded and installed:
+
+http://www.chluba.de/SZpack/
+
+For details on the computations involved please refer to the following references:
+
+Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
+Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.utilities.physical_constants import sigma_thompson, clight, hcgs, kboltz, mh, Tcmb
+from yt.data_objects.image_array import ImageArray
+from yt.data_objects.field_info_container import add_field
+from yt.funcs import fix_axis, mylog, iterable, get_pbar
+from yt.utilities.definitions import inv_axis_names
+from yt.visualization.image_writer import write_fits, write_projection
+from yt.visualization.volume_rendering.camera import off_axis_projection
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ communication_system, parallel_root_only
+import numpy as np
+
+I0 = 2*(kboltz*Tcmb)**3/((hcgs*clight)**2)*1.0e17
+
+try:
+ import SZpack
+except:
+ raise ImportError("SZpack not installed. It can be obtained from from http://www.chluba.de/SZpack/.")
+
+vlist = "xyz"
+
+def _t_squared(field, data):
+ return data["Density"]*data["TempkeV"]*data["TempkeV"]
+add_field("TSquared", function=_t_squared)
+
+def _beta_perp_squared(field, data):
+ return data["Density"]*data["VelocityMagnitude"]**2/clight/clight - data["BetaParSquared"]
+add_field("BetaPerpSquared", function=_beta_perp_squared)
+
+def _beta_par_squared(field, data):
+ return data["BetaPar"]**2/data["Density"]
+add_field("BetaParSquared", function=_beta_par_squared)
+
+def _t_beta_par(field, data):
+ return data["TempkeV"]*data["BetaPar"]
+add_field("TBetaPar", function=_t_beta_par)
+
+def _t_sz(field, data):
+ return data["Density"]*data["TempkeV"]
+add_field("TeSZ", function=_t_sz)
+
+class SZProjection(object):
+ r""" Initialize a SZProjection object.
+
+ Parameters
+ ----------
+ pf : parameter_file
+ The parameter file.
+ freqs : array_like
+ The frequencies (in GHz) at which to compute the SZ spectral distortion.
+ mue : float, optional
+ Mean molecular weight for determining the electron number density.
+ high_order : boolean, optional
+ Should we calculate high-order moments of velocity and temperature?
+
+ Examples
+ --------
+ >>> freqs = [90., 180., 240.]
+ >>> szprj = SZProjection(pf, freqs, high_order=True)
+ """
+ def __init__(self, pf, freqs, mue=1.143, high_order=False):
+
+ self.pf = pf
+ self.num_freqs = len(freqs)
+ self.high_order = high_order
+ self.freqs = np.array(freqs)
+ self.mueinv = 1./mue
+ self.xinit = hcgs*self.freqs*1.0e9/(kboltz*Tcmb)
+ self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
+ self.data = {}
+
+ self.units = {}
+ self.units["TeSZ"] = r"$\mathrm{keV}$"
+ self.units["Tau"] = None
+
+ self.display_names = {}
+ self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
+ self.display_names["Tau"] = r"$\mathrm{\tau}$"
+
+ for f, field in zip(self.freqs, self.freq_fields):
+ self.units[field] = r"$\mathrm{MJy\ sr^{-1}}$"
+ self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % (int(f))
+
+ def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
+ r""" Make an on-axis projection of the SZ signal.
+
+ Parameters
+ ----------
+ axis : integer or string
+ The axis of the simulation domain along which to make the SZprojection.
+ center : array_like or string, optional
+ The center of the projection.
+ width : float or tuple
+ The width of the projection.
+ nx : integer, optional
+ The dimensions on a side of the projection image.
+ source : yt.data_objects.api.AMRData, optional
+ If specified, this will be the data source used for selecting regions to project.
+
+ Examples
+ --------
+ >>> szprj.on_axis("y", center="max", width=(1.0, "mpc"), source=my_sphere)
+ """
+ axis = fix_axis(axis)
+
+ def _beta_par(field, data):
+ axis = data.get_field_parameter("axis")
+ # Load these, even though we will only use one
+ for ax in 'xyz':
+ data['%s-velocity' % ax]
+ vpar = data["Density"]*data["%s-velocity" % (vlist[axis])]
+ return vpar/clight
+ add_field("BetaPar", function=_beta_par)
+ self.pf.h._derived_fields_add(["BetaPar"])
+
+ proj = self.pf.h.proj("Density", axis, data_source=source)
+ proj.data_source.set_field_parameter("axis", axis)
+ frb = proj.to_frb(width, nx)
+ dens = frb["Density"]
+ Te = frb["TeSZ"]/dens
+ bpar = frb["BetaPar"]/dens
+ omega1 = frb["TSquared"]/dens/(Te*Te) - 1.
+ bperp2 = np.zeros((nx,nx))
+ sigma1 = np.zeros((nx,nx))
+ kappa1 = np.zeros((nx,nx))
+ if self.high_order:
+ bperp2 = frb["BetaPerpSquared"]/dens
+ sigma1 = frb["TBetaPar"]/dens/Te - bpar
+ kappa1 = frb["BetaParSquared"]/dens - bpar*bpar
+ tau = sigma_thompson*dens*self.mueinv/mh
+
+ nx,ny = frb.buff_size
+ self.bounds = frb.bounds
+ self.dx = (frb.bounds[1]-frb.bounds[0])/nx
+ self.dy = (frb.bounds[3]-frb.bounds[2])/ny
+ self.nx = nx
+
+ self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+ def off_axis(self, L, center="c", width=(1, "unitary"), nx=800, source=None):
+ r""" Make an off-axis projection of the SZ signal.
+
+ Parameters
+ ----------
+ L : array_like
+ The normal vector of the projection.
+ center : array_like or string, optional
+ The center of the projection.
+ width : float or tuple
+ The width of the projection.
+ nx : integer, optional
+ The dimensions on a side of the projection image.
+ source : yt.data_objects.api.AMRData, optional
+ If specified, this will be the data source used for selecting regions to project.
+ Currently unsupported in yt 2.x.
+
+ Examples
+ --------
+ >>> L = np.array([0.5, 1.0, 0.75])
+ >>> szprj.off_axis(L, center="c", width=(2.0, "mpc"))
+ """
+ if iterable(width):
+ w = width[0]/self.pf.units[width[1]]
+ else:
+ w = width
+ if center == "c":
+ ctr = self.pf.domain_center
+ elif center == "max":
+ ctr = self.pf.h.find_max("Density")
+ else:
+ ctr = center
+
+ if source is not None:
+ mylog.error("Source argument is not currently supported for off-axis S-Z projections.")
+ raise NotImplementedError
+
+ def _beta_par(field, data):
+ vpar = data["Density"]*(data["x-velocity"]*L[0]+
+ data["y-velocity"]*L[1]+
+ data["z-velocity"]*L[2])
+ return vpar/clight
+ add_field("BetaPar", function=_beta_par)
+ self.pf.h._derived_fields_add(["BetaPar"])
+
+ dens = off_axis_projection(self.pf, ctr, L, w, nx, "Density")
+ Te = off_axis_projection(self.pf, ctr, L, w, nx, "TeSZ")/dens
+ bpar = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPar")/dens
+ omega1 = off_axis_projection(self.pf, ctr, L, w, nx, "TSquared")/dens
+ omega1 = omega1/(Te*Te) - 1.
+ if self.high_order:
+ bperp2 = off_axis_projection(self.pf, ctr, L, w, nx, "BetaPerpSquared")/dens
+ sigma1 = off_axis_projection(self.pf, ctr, L, w, nx, "TBetaPar")/dens
+ sigma1 = sigma1/Te - bpar
+ kappa1 = off_axis_projection(self.pf, ctr, L, w, nx, "BetaParSquared")/dens
+ kappa1 -= bpar
+ else:
+ bperp2 = np.zeros((nx,nx))
+ sigma1 = np.zeros((nx,nx))
+ kappa1 = np.zeros((nx,nx))
+ tau = sigma_thompson*dens*self.mueinv/mh
+
+ self.bounds = np.array([-0.5*w, 0.5*w, -0.5*w, 0.5*w])
+ self.dx = w/nx
+ self.dy = w/nx
+ self.nx = nx
+
+ self._compute_intensity(tau, Te, bpar, omega1, sigma1, kappa1, bperp2)
+
+ def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
+
+ # Bad hack, but we get NaNs if we don't do something like this
+ small_beta = np.abs(bpar) < 1.0e-20
+ bpar[small_beta] = 1.0e-20
+
+ comm = communication_system.communicators[-1]
+
+ nx, ny = self.nx,self.nx
+ signal = np.zeros((self.num_freqs,nx,ny))
+ xo = np.zeros((self.num_freqs))
+
+ k = int(0)
+
+ start_i = comm.rank*nx/comm.size
+ end_i = (comm.rank+1)*nx/comm.size
+
+ pbar = get_pbar("Computing SZ signal.", nx*nx)
+
+ for i in xrange(start_i, end_i):
+ for j in xrange(ny):
+ xo[:] = self.xinit[:]
+ SZpack.compute_combo_means(xo, tau[i,j], Te[i,j],
+ bpar[i,j], omega1[i,j],
+ sigma1[i,j], kappa1[i,j], bperp2[i,j])
+ signal[:,i,j] = xo[:]
+ pbar.update(k)
+ k += 1
+
+ signal = comm.mpi_allreduce(signal)
+
+ pbar.finish()
+
+ for i, field in enumerate(self.freq_fields):
+ self.data[field] = ImageArray(I0*self.xinit[i]**3*signal[i,:,:])
+ self.data["Tau"] = ImageArray(tau)
+ self.data["TeSZ"] = ImageArray(Te)
+
+ @parallel_root_only
+ def write_fits(self, filename_prefix, clobber=True):
+ r""" Export images to a FITS file. Writes the SZ distortion in all
+ specified frequencies as well as the mass-weighted temperature and the
+ optical depth. Distance units are in kpc.
+
+ Parameters
+ ----------
+ filename_prefix : string
+ The prefix of the FITS filename.
+ clobber : boolean, optional
+ If the file already exists, do we overwrite?
+
+ Examples
+ --------
+ >>> szprj.write_fits("SZbullet", clobber=False)
+ """
+ coords = {}
+ coords["dx"] = self.dx*self.pf.units["kpc"]
+ coords["dy"] = self.dy*self.pf.units["kpc"]
+ coords["xctr"] = 0.0
+ coords["yctr"] = 0.0
+ coords["units"] = "kpc"
+ other_keys = {"Time" : self.pf.current_time}
+ write_fits(self.data, filename_prefix, clobber=clobber, coords=coords,
+ other_keys=other_keys)
+
+ @parallel_root_only
+ def write_png(self, filename_prefix):
+ r""" Export images to PNG files. Writes the SZ distortion in all
+ specified frequencies as well as the mass-weighted temperature and the
+ optical depth. Distance units are in kpc.
+
+ Parameters
+ ----------
+ filename_prefix : string
+ The prefix of the image filenames.
+
+ Examples
+ --------
+ >>> szprj.write_png("SZsloshing")
+ """
+ extent = tuple([bound*self.pf.units["kpc"] for bound in self.bounds])
+ for field, image in self.items():
+ filename=filename_prefix+"_"+field+".png"
+ label = self.display_names[field]
+ if self.units[field] is not None:
+ label += " ("+self.units[field]+")"
+ write_projection(image, filename, colorbar_label=label, take_log=False,
+ extent=extent, xlabel=r"$\mathrm{x\ (kpc)}$",
+ ylabel=r"$\mathrm{y\ (kpc)}$")
+
+ @parallel_root_only
+ def write_hdf5(self, filename):
+ r"""Export the set of S-Z fields to a set of HDF5 datasets.
+
+ Parameters
+ ----------
+ filename : string
+ This file will be opened in "write" mode.
+
+ Examples
+ --------
+ >>> szprj.write_hdf5("SZsloshing.h5")
+ """
+ import h5py
+ f = h5py.File(filename, "w")
+ for field, data in self.items():
+ f.create_dataset(field,data=data)
+ f.close()
+
+ def keys(self):
+ return self.data.keys()
+
+ def items(self):
+ return self.data.items()
+
+ def values(self):
+ return self.data.values()
+
+ def has_key(self, key):
+ return key in self.data.keys()
+
+ def __getitem__(self, key):
+ return self.data[key]
+
+ @property
+ def shape(self):
+ return (self.nx,self.nx)
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/setup.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/setup.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('sunyaev_zeldovich', parent_package, top_path)
+ config.add_subpackage("tests")
+ config.make_config_py() # installs __config__.py
+ #config.make_svn_version_py()
+ return config
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
--- /dev/null
+++ b/yt/analysis_modules/sunyaev_zeldovich/tests/test_projection.py
@@ -0,0 +1,139 @@
+"""
+Unit test the sunyaev_zeldovich analysis module.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.frontends.stream.api import load_uniform_grid
+from yt.funcs import get_pbar, mylog
+from yt.utilities.physical_constants import cm_per_kpc, K_per_keV, \
+ mh, cm_per_km, kboltz, Tcmb, hcgs, clight, sigma_thompson
+from yt.testing import *
+from yt.utilities.answer_testing.framework import requires_pf, \
+ GenericArrayTest, data_dir_load, GenericImageTest
+try:
+ from yt.analysis_modules.sunyaev_zeldovich.projection import SZProjection, I0
+except ImportError:
+ pass
+import numpy as np
+try:
+ import SZpack
+except ImportError:
+ pass
+
+mue = 1./0.88
+freqs = np.array([30., 90., 240.])
+
+def setup():
+ """Test specific setup."""
+ from yt.config import ytcfg
+ ytcfg["yt", "__withintesting"] = "True"
+
+def full_szpack3d(pf, xo):
+ data = pf.h.grids[0]
+ dz = pf.h.get_smallest_dx()*pf.units["cm"]
+ nx,ny,nz = data["Density"].shape
+ dn = np.zeros((nx,ny,nz))
+ Dtau = sigma_thompson*data["Density"]/(mh*mue)*dz
+ Te = data["Temperature"]/K_per_keV
+ betac = data["z-velocity"]/clight
+ pbar = get_pbar("Computing 3-D cell-by-cell S-Z signal for comparison.", nx)
+ for i in xrange(nx):
+ pbar.update(i)
+ for j in xrange(ny):
+ for k in xrange(nz):
+ dn[i,j,k] = SZpack.compute_3d(xo, Dtau[i,j,k],
+ Te[i,j,k], betac[i,j,k],
+ 1.0, 0.0, 0.0, 1.0e-5)
+ pbar.finish()
+ return I0*xo**3*np.sum(dn, axis=2)
+
+def setup_cluster():
+
+ R = 1000.
+ r_c = 100.
+ rho_c = 1.673e-26
+ beta = 1.
+ T0 = 4.
+ nx,ny,nz = 16,16,16
+ c = 0.17
+ a_c = 30.
+ a = 200.
+ v0 = 300.*cm_per_km
+ ddims = (nx,ny,nz)
+
+ x, y, z = np.mgrid[-R:R:nx*1j,
+ -R:R:ny*1j,
+ -R:R:nz*1j]
+
+ r = np.sqrt(x**2+y**2+z**2)
+
+ dens = np.zeros(ddims)
+ dens = rho_c*(1.+(r/r_c)**2)**(-1.5*beta)
+ temp = T0*K_per_keV/(1.+r/a)*(c+r/a_c)/(1.+r/a_c)
+ velz = v0*temp/(T0*K_per_keV)
+
+ data = {}
+ data["Density"] = dens
+ data["Temperature"] = temp
+ data["x-velocity"] = np.zeros(ddims)
+ data["y-velocity"] = np.zeros(ddims)
+ data["z-velocity"] = velz
+
+ bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])
+
+ L = 2*R*cm_per_kpc
+ dl = L/nz
+
+ pf = load_uniform_grid(data, ddims, L, bbox=bbox)
+
+ return pf
+
+ at requires_module("SZpack")
+def test_projection():
+ pf = setup_cluster()
+ nx,ny,nz = pf.domain_dimensions
+ xinit = 1.0e9*hcgs*freqs/(kboltz*Tcmb)
+ szprj = SZProjection(pf, freqs, mue=mue, high_order=True)
+ szprj.on_axis(2, nx=nx)
+ deltaI = np.zeros((3,nx,ny))
+ for i in xrange(3):
+ deltaI[i,:,:] = full_szpack3d(pf, xinit[i])
+ yield assert_almost_equal, deltaI[i,:,:], szprj["%d_GHz" % int(freqs[i])], 6
+
+M7 = "DD0010/moving7_0010"
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_onaxis():
+ pf = data_dir_load(M7)
+ szprj = SZProjection(pf, freqs)
+ szprj.on_axis(2, nx=100)
+ def onaxis_array_func():
+ return szprj.data
+ def onaxis_image_func(filename_prefix):
+ szprj.write_png(filename_prefix)
+ for test in [GenericArrayTest(pf, onaxis_array_func),
+ GenericImageTest(pf, onaxis_image_func, 3)]:
+ test_M7_onaxis.__name__ = test.description
+ yield test
+
+ at requires_module("SZpack")
+ at requires_pf(M7)
+def test_M7_offaxis():
+ pf = data_dir_load(M7)
+ szprj = SZProjection(pf, freqs)
+ szprj.off_axis(np.array([0.1,-0.2,0.4]), nx=100)
+ def offaxis_array_func():
+ return szprj.data
+ def offaxis_image_func(filename_prefix):
+ szprj.write_png(filename_prefix)
+ for test in [GenericArrayTest(pf, offaxis_array_func),
+ GenericImageTest(pf, offaxis_image_func, 3)]:
+ test_M7_offaxis.__name__ = test.description
+ yield test
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/analysis_modules/two_point_functions/two_point_functions.py
--- a/yt/analysis_modules/two_point_functions/two_point_functions.py
+++ b/yt/analysis_modules/two_point_functions/two_point_functions.py
@@ -498,7 +498,7 @@
points[:, 2] = points[:, 2] / self.period[2]
fKD.qv_many = points.T
fKD.nn_tags = np.asfortranarray(np.empty((1, points.shape[0]), dtype='int64'))
- find_many_nn_nearest_neighbors()
+ fKD.find_many_nn_nearest_neighbors()
# The -1 is for fortran counting.
n = fKD.nn_tags[0,:] - 1
return n
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/config.py
--- a/yt/config.py
+++ b/yt/config.py
@@ -52,7 +52,7 @@
notebook_password = '',
answer_testing_tolerance = '3',
answer_testing_bitwise = 'False',
- gold_standard_filename = 'gold310',
+ gold_standard_filename = 'gold311',
local_standard_filename = 'local001',
sketchfab_api_key = 'None',
thread_field_detection = 'False'
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/construction_data_containers.py
--- a/yt/data_objects/construction_data_containers.py
+++ b/yt/data_objects/construction_data_containers.py
@@ -398,7 +398,8 @@
center, pf, field_parameters)
self.left_edge = np.array(left_edge)
self.level = level
- rdx = self.pf.domain_dimensions*self.pf.refine_by**level
+
+ rdx = self.pf.domain_dimensions*self.pf.relative_refinement(0, level)
rdx[np.where(dims - 2 * num_ghost_zones <= 1)] = 1 # issue 602
self.dds = self.pf.domain_width / rdx.astype("float64")
self.ActiveDimensions = np.array(dims, dtype='int32')
@@ -488,9 +489,11 @@
output_fields = [np.zeros(self.ActiveDimensions, dtype="float64")
for field in fields]
domain_dims = self.pf.domain_dimensions.astype("int64") \
- * self.pf.refine_by**self.level
+ * self.pf.relative_refinement(0, self.level)
for chunk in self._data_source.chunks(fields, "io"):
input_fields = [chunk[field] for field in fields]
+ # NOTE: This usage of "refine_by" is actually *okay*, because it's
+ # being used with respect to iref, which is *already* scaled!
fill_region(input_fields, output_fields, self.level,
self.global_startindex, chunk.icoords, chunk.ires,
domain_dims, self.pf.refine_by)
@@ -647,10 +650,12 @@
ls = self._initialize_level_state(fields)
for level in range(self.level + 1):
domain_dims = self.pf.domain_dimensions.astype("int64") \
- * self.pf.refine_by**level
+ * self.pf.relative_refinement(0, self.level)
for chunk in ls.data_source.chunks(fields, "io"):
chunk[fields[0]]
input_fields = [chunk[field] for field in fields]
+ # NOTE: This usage of "refine_by" is actually *okay*, because it's
+ # being used with respect to iref, which is *already* scaled!
fill_region(input_fields, ls.fields, ls.current_level,
ls.global_startindex, chunk.icoords,
chunk.ires, domain_dims, self.pf.refine_by)
@@ -682,14 +687,16 @@
def _update_level_state(self, level_state):
ls = level_state
if ls.current_level >= self.level: return
+ rf = float(self.pf.relative_refinement(
+ ls.current_level, ls.current_level + 1))
ls.current_level += 1
- ls.current_dx = self._base_dx / self.pf.refine_by**ls.current_level
+ ls.current_dx = self._base_dx / \
+ self.pf.relative_refinement(0, ls.current_level)
self._setup_data_source(ls)
LL = self.left_edge - self.pf.domain_left_edge
ls.old_global_startindex = ls.global_startindex
ls.global_startindex = np.rint(LL / ls.current_dx).astype('int64') - 1
ls.domain_iwidth = np.rint(self.pf.domain_width/ls.current_dx).astype('int64')
- rf = float(self.pf.refine_by)
input_left = (level_state.old_global_startindex + 0.5) * rf
width = (self.ActiveDimensions*self.dds)
output_dims = np.rint(width/level_state.current_dx+0.5).astype("int32") + 2
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -469,7 +469,6 @@
fd = self.pf.field_dependencies.get(field, None) or \
self.pf.field_dependencies.get(field[1], None)
if fd is None: continue
- fd = self.pf.field_dependencies[field]
requested = self._determine_fields(list(set(fd.requested)))
deps = [d for d in requested if d not in fields_to_get]
fields_to_get += deps
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/field_info_container.py
--- a/yt/data_objects/field_info_container.py
+++ b/yt/data_objects/field_info_container.py
@@ -264,7 +264,7 @@
if not isinstance(item, tuple):
field = ("unknown", item)
finfo = self.pf._get_field_info(*field)
- mylog.debug("Guessing field %s is %s", item, finfo.name)
+ #mylog.debug("Guessing field %s is %s", item, finfo.name)
else:
field = item
finfo = self.pf._get_field_info(*field)
@@ -301,7 +301,8 @@
return self[item]
elif finfo is not None and finfo.particle_type:
if item == "Coordinates" or item[1] == "Coordinates" or \
- item == "Velocities" or item[1] == "Velocities":
+ item == "Velocities" or item[1] == "Velocities" or \
+ item == "Velocity" or item[1] == "Velocity":
# A vector
self[item] = np.ones((self.NumberOfParticles, 3))
else:
@@ -329,6 +330,8 @@
self.requested_parameters.append(param)
if param in ['bulk_velocity', 'center', 'normal']:
return np.random.random(3) * 1e-2
+ elif param in ['axis']:
+ return 0
else:
return 0.0
_num_ghost_zones = 0
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -56,6 +56,7 @@
self.pf = self.hierarchy.parameter_file # weakref already
self._child_mask = self._child_indices = self._child_index_mask = None
self.start_index = None
+ self.filename = filename
self._last_mask = None
self._last_count = -1
self._last_selector_id = None
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/octree_subset.py
--- a/yt/data_objects/octree_subset.py
+++ b/yt/data_objects/octree_subset.py
@@ -113,7 +113,7 @@
_domain_ind = None
def select_blocks(self, selector):
- mask = self.oct_handler.mask(selector)
+ mask = self.oct_handler.mask(selector, domain_id = self.domain_id)
mask = self._reshape_vals(mask)
slicer = OctreeSubsetBlockSlice(self)
for i, sl in slicer:
@@ -271,12 +271,14 @@
@property
def LeftEdge(self):
- LE = self._fcoords[0,0,0,self.ind,:] - self._fwidth[0,0,0,self.ind,:]*0.5
+ LE = (self._fcoords[0,0,0,self.ind,:]
+ - self._fwidth[0,0,0,self.ind,:])*0.5
return LE
@property
def RightEdge(self):
- RE = self._fcoords[1,1,1,self.ind,:] + self._fwidth[1,1,1,self.ind,:]*0.5
+ RE = (self._fcoords[-1,-1,-1,self.ind,:]
+ + self._fwidth[-1,-1,-1,self.ind,:])*0.5
return RE
@property
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/particle_fields.py
--- a/yt/data_objects/particle_fields.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-These are common particle deposition fields.
-
-
-
-
-"""
-
-#-----------------------------------------------------------------------------
-# Copyright (c) 2013, yt Development Team.
-#
-# Distributed under the terms of the Modified BSD License.
-#
-# The full license is in the file COPYING.txt, distributed with this software.
-#-----------------------------------------------------------------------------
-
-import numpy as np
-
-from yt.funcs import *
-from yt.data_objects.field_info_container import \
- FieldInfoContainer, \
- FieldInfo, \
- ValidateParameter, \
- ValidateDataField, \
- ValidateProperty, \
- ValidateSpatial, \
- ValidateGridType, \
- NullFunc, \
- TranslationFunc
-from yt.utilities.physical_constants import \
- mass_hydrogen_cgs, \
- mass_sun_cgs, \
- mh
-
-def _field_concat(fname):
- def _AllFields(field, data):
- v = []
- for ptype in data.pf.particle_types:
- data.pf._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.pf.known_filters:
- continue
- v.append(data[ptype, fname].copy())
- rv = np.concatenate(v, axis=0)
- return rv
- return _AllFields
-
-def _field_concat_slice(fname, axi):
- def _AllFields(field, data):
- v = []
- for ptype in data.pf.particle_types:
- data.pf._last_freq = (ptype, None)
- if ptype == "all" or \
- ptype in data.pf.known_filters:
- continue
- v.append(data[ptype, fname][:,axi])
- rv = np.concatenate(v, axis=0)
- return rv
- return _AllFields
-
-def particle_deposition_functions(ptype, coord_name, mass_name, registry):
- orig = set(registry.keys())
- def particle_count(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, method = "count")
- return d
-
- registry.add_field(("deposit", "%s_count" % ptype),
- function = particle_count,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Count}" % ptype,
- projection_conversion = '1')
-
- def particle_mass(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
- return d
-
- registry.add_field(("deposit", "%s_mass" % ptype),
- function = particle_mass,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Mass}" % ptype,
- units = r"\mathrm{g}",
- projected_units = r"\mathrm{g}\/\mathrm{cm}",
- projection_conversion = 'cm')
-
- def particle_density(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "sum")
- d /= data["gas","CellVolume"]
- return d
-
- registry.add_field(("deposit", "%s_density" % ptype),
- function = particle_density,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s Density}" % ptype,
- units = r"\mathrm{g}/\mathrm{cm}^{3}",
- projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
- projection_conversion = 'cm')
-
- def particle_cic(field, data):
- pos = data[ptype, coord_name]
- d = data.deposit(pos, [data[ptype, mass_name]], method = "cic")
- d /= data["gas","CellVolume"]
- return d
-
- registry.add_field(("deposit", "%s_cic" % ptype),
- function = particle_cic,
- validators = [ValidateSpatial()],
- display_name = "\\mathrm{%s CIC Density}" % ptype,
- units = r"\mathrm{g}/\mathrm{cm}^{3}",
- projected_units = r"\mathrm{g}/\mathrm{cm}^{2}",
- projection_conversion = 'cm')
-
- # Now some translation functions.
-
- def particle_ones(field, data):
- return np.ones(data[ptype, mass_name].shape, dtype="float64")
-
- registry.add_field((ptype, "particle_ones"),
- function = particle_ones,
- particle_type = True,
- units = "")
-
- registry.add_field((ptype, "ParticleMass"),
- function = TranslationFunc((ptype, mass_name)),
- particle_type = True,
- units = r"\mathrm{g}")
-
- def _ParticleMassMsun(field, data):
- return data[ptype, mass_name].copy()
- def _conv_Msun(data):
- return 1.0/mass_sun_cgs
-
- registry.add_field((ptype, "ParticleMassMsun"),
- function = _ParticleMassMsun,
- convert_function = _conv_Msun,
- particle_type = True,
- units = r"\mathrm{M}_\odot")
-
- def particle_mesh_ids(field, data):
- pos = data[ptype, coord_name]
- ids = np.zeros(pos.shape[0], dtype="float64") - 1
- # This is float64 in name only. It will be properly cast inside the
- # deposit operation.
- #_ids = ids.view("float64")
- data.deposit(pos, [ids], method = "mesh_id")
- return ids
- registry.add_field((ptype, "mesh_id"),
- function = particle_mesh_ids,
- validators = [ValidateSpatial()],
- particle_type = True)
-
- return list(set(registry.keys()).difference(orig))
-
-
-def particle_scalar_functions(ptype, coord_name, vel_name, registry):
-
- # Now we have to set up the various velocity and coordinate things. In the
- # future, we'll actually invert this and use the 3-component items
- # elsewhere, and stop using these.
-
- # Note that we pass in _ptype here so that it's defined inside the closure.
- orig = set(registry.keys())
-
- def _get_coord_funcs(axi, _ptype):
- def _particle_velocity(field, data):
- return data[_ptype, vel_name][:,axi]
- def _particle_position(field, data):
- return data[_ptype, coord_name][:,axi]
- return _particle_velocity, _particle_position
- for axi, ax in enumerate("xyz"):
- v, p = _get_coord_funcs(axi, ptype)
- registry.add_field((ptype, "particle_velocity_%s" % ax),
- particle_type = True, function = v)
- registry.add_field((ptype, "particle_position_%s" % ax),
- particle_type = True, function = p)
-
- return list(set(registry.keys()).difference(orig))
-
-def particle_vector_functions(ptype, coord_names, vel_names, registry):
-
- # This will column_stack a set of scalars to create vector fields.
- orig = set(registry.keys())
-
- def _get_vec_func(_ptype, names):
- def particle_vectors(field, data):
- return np.column_stack([data[_ptype, name] for name in names])
- return particle_vectors
- registry.add_field((ptype, "Coordinates"),
- function=_get_vec_func(ptype, coord_names),
- particle_type=True)
- registry.add_field((ptype, "Velocities"),
- function=_get_vec_func(ptype, vel_names),
- particle_type=True)
-
- return list(set(registry.keys()).difference(orig))
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/particle_unions.py
--- /dev/null
+++ b/yt/data_objects/particle_unions.py
@@ -0,0 +1,27 @@
+"""
+These are particle union objects. These essentially alias one particle to
+another, where the other can be one or several particle types.
+
+
+
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.funcs import ensure_list
+
+class ParticleUnion(object):
+ def __init__(self, name, sub_types):
+ self.name = name
+ self.sub_types = ensure_list(sub_types)
+
+ def __iter__(self):
+ for st in self.sub_types:
+ yield st
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/static_output.py
--- a/yt/data_objects/static_output.py
+++ b/yt/data_objects/static_output.py
@@ -29,6 +29,8 @@
FieldInfoContainer, NullFunc
from yt.data_objects.particle_filters import \
filter_registry
+from yt.data_objects.particle_unions import \
+ ParticleUnion
from yt.utilities.minimal_representation import \
MinimalStaticOutput
@@ -48,13 +50,17 @@
default_fluid_type = "gas"
fluid_types = ("gas","deposit")
- particle_types = ("all",)
+ particle_types = ("io",) # By default we have an 'all'
+ particle_types_raw = ("io",)
geometry = "cartesian"
coordinates = None
max_level = 99
storage_filename = None
_particle_mass_name = None
_particle_coordinates_name = None
+ _particle_velocity_name = None
+ particle_unions = None
+ known_filters = None
class __metaclass__(type):
def __init__(cls, name, b, d):
@@ -91,7 +97,8 @@
self.file_style = file_style
self.conversion_factors = {}
self.parameters = {}
- self.known_filters = {}
+ self.known_filters = self.known_filters or {}
+ self.particle_unions = self.particle_unions or {}
# path stuff
self.parameter_filename = str(filename)
@@ -213,6 +220,11 @@
raise RuntimeError("You should not instantiate StaticOutput.")
self._instantiated_hierarchy = self._hierarchy_class(
self, data_style=self.data_style)
+ # Now we do things that we need an instantiated hierarchy for
+ if "all" not in self.particle_types:
+ mylog.debug("Creating Particle Union 'all'")
+ pu = ParticleUnion("all", list(self.particle_types_raw))
+ self.add_particle_union(pu)
return self._instantiated_hierarchy
h = hierarchy # alias
@@ -308,12 +320,41 @@
return self._last_finfo
# We also should check "all" for particles, which can show up if you're
# mixing deposition/gas fields with particle fields.
- if guessing_type and ("all", fname) in self.field_info:
- self._last_freq = ("all", fname)
- self._last_finfo = self.field_info["all", fname]
- return self._last_finfo
+ if guessing_type:
+ for ftype in ("all", self.default_fluid_type):
+ if (ftype, fname) in self.field_info:
+ self._last_freq = (ftype, fname)
+ self._last_finfo = self.field_info[(ftype, fname)]
+ return self._last_finfo
raise YTFieldNotFound((ftype, fname), self)
+ def add_particle_union(self, union):
+ # No string lookups here, we need an actual union.
+ f = self.particle_fields_by_type
+ fields = set_intersection([f[s] for s in union
+ if s in self.particle_types_raw])
+ self.particle_types += (union.name,)
+ self.particle_unions[union.name] = union
+ fields = [ (union.name, field) for field in fields]
+ self.h.field_list.extend(fields)
+ # Give ourselves a chance to add them here, first, then...
+ # ...if we can't find them, we set them up as defaults.
+ self.h._setup_particle_types([union.name])
+ self.h._setup_unknown_fields(fields, self.field_info,
+ skip_removal = True)
+
+ def _setup_particle_type(self, ptype):
+ mylog.debug("Don't know what to do with %s", ptype)
+ return []
+
+ @property
+ def particle_fields_by_type(self):
+ fields = defaultdict(list)
+ for field in self.h.field_list:
+ if field[0] in self.particle_types_raw:
+ fields[field[0]].append(field[1])
+ return fields
+
@property
def ires_factor(self):
o2 = np.log2(self.refine_by)
@@ -321,6 +362,9 @@
raise RuntimeError
return int(o2)
+ def relative_refinement(self, l0, l1):
+ return self.refine_by**(l1-l0)
+
def _reconstruct_pf(*args, **kwargs):
pfs = ParameterFileStore()
pf = pfs.get_pf_hash(*args)
diff -r 5583f1046fceb372ba8d5471600283f7ee3597e9 -r 1613797d60ad38a2a6675c2219c3c5df526a29fb yt/data_objects/tests/test_fields.py
--- a/yt/data_objects/tests/test_fields.py
+++ /dev/null
@@ -1,108 +0,0 @@
-from yt.testing import *
-import numpy as np
-from yt.data_objects.field_info_container import \
- FieldInfo
-import yt.data_objects.universal_fields
-from yt.utilities.definitions import \
- mpc_conversion, sec_conversion
-
-def setup():
- from yt.config import ytcfg
- ytcfg["yt","__withintesting"] = "True"
- np.seterr(all = 'ignore')
-
-_sample_parameters = dict(
- axis = 0,
- center = np.array((0.0, 0.0, 0.0)),
- bulk_velocity = np.array((0.0, 0.0, 0.0)),
- normal = np.array((0.0, 0.0, 1.0)),
- cp_x_vec = np.array((1.0, 0.0, 0.0)),
- cp_y_vec = np.array((0.0, 1.0, 0.0)),
- cp_z_vec = np.array((0.0, 0.0, 1.0)),
-)
-
-_base_fields = ["Density", "x-velocity", "y-velocity", "z-velocity"]
-
-def realistic_pf(fields, nprocs):
- np.random.seed(int(0x4d3d3d3))
- pf = fake_random_pf(16, fields = fields, nprocs = nprocs)
- pf.parameters["HydroMethod"] = "streaming"
- pf.parameters["Gamma"] = 5.0/3.0
- pf.parameters["EOSType"] = 1.0
- pf.parameters["EOSSoundSpeed"] = 1.0
- pf.conversion_factors["Time"] = 1.0
- pf.conversion_factors.update( dict((f, 1.0) for f in fields) )
- pf.current_redshift = 0.0001
- pf.hubble_constant = 0.7
- pf.omega_matter = 0.27
- for unit in mpc_conversion:
- pf.units[unit+'h'] = pf.units[unit]
- pf.units[unit+'cm'] = pf.units[unit]
- pf.units[unit+'hcm'] = pf.units[unit]
- return pf
-
-class TestFieldAccess(object):
- description = None
-
- def __init__(self, field_name, nproc):
- # Note this should be a field name
- self.field_name = field_name
- self.description = "Accessing_%s_%s" % (field_name, nproc)
- self.nproc = nproc
-
- def __call__(self):
- field = FieldInfo[self.field_name]
- deps = field.get_dependencies()
- fields = list(set(deps.requested + _base_fields))
- skip_grids = False
- needs_spatial = False
- for v in field.validators:
- f = getattr(v, "fields", None)
- if f: fields += f
- if getattr(v, "ghost_zones", 0) > 0:
- skip_grids = True
- if hasattr(v, "ghost_zones"):
- needs_spatial = True
- pf = realistic_pf(fields, self.nproc)
- # This gives unequal sized grids as well as subgrids
- dd1 = pf.h.all_data()
- dd2 = pf.h.all_data()
- dd1.field_parameters.update(_sample_parameters)
- dd2.field_parameters.update(_sample_parameters)
- v1 = dd1[self.field_name]
- conv = field._convert_function(dd1) or 1.0
- if not field.particle_type:
- assert_equal(v1, dd1["gas", self.field_name])
- if not needs_spatial:
- assert_array_almost_equal_nulp(v1, conv*field._function(field, dd2), 4)
- if not skip_grids:
- for g in pf.h.grids:
- g.field_parameters.update(_sample_parameters)
- conv = field._convert_function(g) or 1.0
- v1 = g[self.field_name]
- g.clear_data()
- g.field_parameters.update(_sample_parameters)
- assert_array_almost_equal_nulp(v1, conv*field._function(field, g), 4)
-
-def test_all_fields():
- for field in FieldInfo:
- if isinstance(field, types.TupleType):
- fname = field[0]
- else:
- fname = field
- if fname.startswith("CuttingPlane"): continue
- if fname.startswith("particle"): continue
- if fname.startswith("CIC"): continue
- if fname.startswith("WeakLensingConvergence"): continue
- if fname.startswith("DensityPerturbation"): continue
- if fname.startswith("Matter_Density"): continue
- if fname.startswith("Overdensity"): continue
- if FieldInfo[field].particle_type: continue
- for nproc in [1, 4, 8]:
- test_all_fields.__name__ = "%s_%s" % (field, nproc)
- yield TestFieldAccess(field, nproc)
-
-if __name__ == "__main__":
- setup()
- for t in test_all_fields():
- t()
This diff is so big that we needed to truncate the remainder.
https://bitbucket.org/yt_analysis/yt-3.0/commits/7f29b0e8bf2b/
Changeset: 7f29b0e8bf2b
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 08:08:10
Summary: Moving the derived quantity functions to the quantities object.
Affected #: 1 file
diff -r 1613797d60ad38a2a6675c2219c3c5df526a29fb -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -746,7 +746,8 @@
self._grids = None
self.quantities = DerivedQuantityCollection(self)
for f in self.quantities.keys():
- self.__dict__[camelcase_to_underscore(f)] = self.quantities[f]
+ self.quantities.__dict__[camelcase_to_underscore(f)] = \
+ self.quantities[f]
def cut_region(self, field_cuts):
"""
https://bitbucket.org/yt_analysis/yt-3.0/commits/aa075fc7dbbd/
Changeset: aa075fc7dbbd
Branch: yt-3.0
User: ngoldbaum
Date: 2013-10-21 10:18:11
Summary: Moving the derived quantities syntax magic into DerivedQuantityCollection.
Affected #: 2 files
diff -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 -r aa075fc7dbbd4ff74832af3d461429a547e19c17 yt/data_objects/data_containers.py
--- a/yt/data_objects/data_containers.py
+++ b/yt/data_objects/data_containers.py
@@ -745,9 +745,6 @@
self.coords = None
self._grids = None
self.quantities = DerivedQuantityCollection(self)
- for f in self.quantities.keys():
- self.quantities.__dict__[camelcase_to_underscore(f)] = \
- self.quantities[f]
def cut_region(self, field_cuts):
"""
diff -r 7f29b0e8bf2b794fd2111e603c437a54412ba513 -r aa075fc7dbbd4ff74832af3d461429a547e19c17 yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -88,8 +88,12 @@
class DerivedQuantityCollection(object):
functions = quantity_info
- def __init__(self, data_source):
- self.data_source = data_source
+ def __new__(cls, data_source, *args, **kwargs):
+ inst = object.__new__(cls)
+ inst.data_source = data_source
+ for f in inst.keys():
+ setattr(inst, camelcase_to_underscore(f), inst[f])
+ return inst
def __getitem__(self, key):
if key not in self.functions:
https://bitbucket.org/yt_analysis/yt-3.0/commits/ea58f0906739/
Changeset: ea58f0906739
Branch: yt-3.0
User: MatthewTurk
Date: 2013-10-22 13:10:58
Summary: Merged in ngoldbaum/yt-3.0 (pull request #112)
Derived quantity syntax sugar
Affected #: 3 files
diff -r 6354e5834da8f23e1e2e5dc799e332d1819d7e02 -r ea58f090673982ed065dfcca187fad1535cf84ac yt/data_objects/derived_quantities.py
--- a/yt/data_objects/derived_quantities.py
+++ b/yt/data_objects/derived_quantities.py
@@ -88,8 +88,12 @@
class DerivedQuantityCollection(object):
functions = quantity_info
- def __init__(self, data_source):
- self.data_source = data_source
+ def __new__(cls, data_source, *args, **kwargs):
+ inst = object.__new__(cls)
+ inst.data_source = data_source
+ for f in inst.keys():
+ setattr(inst, camelcase_to_underscore(f), inst[f])
+ return inst
def __getitem__(self, key):
if key not in self.functions:
diff -r 6354e5834da8f23e1e2e5dc799e332d1819d7e02 -r ea58f090673982ed065dfcca187fad1535cf84ac yt/funcs.py
--- a/yt/funcs.py
+++ b/yt/funcs.py
@@ -14,7 +14,7 @@
#-----------------------------------------------------------------------------
import __builtin__
-import time, types, signal, inspect, traceback, sys, pdb, os
+import time, types, signal, inspect, traceback, sys, pdb, os, re
import contextlib
import warnings, struct, subprocess
import numpy as np
@@ -626,6 +626,10 @@
if not os.path.exists(my_dir):
only_on_root(os.makedirs, my_dir)
+def camelcase_to_underscore(name):
+ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
+ return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
+
def set_intersection(some_list):
if len(some_list) == 0: return set([])
# This accepts a list of iterables, which we get the intersection of.
Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list