[yt-svn] commit/yt-3.0: 3 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Thu Nov 21 15:50:35 PST 2013
3 new commits in yt-3.0:
https://bitbucket.org/yt_analysis/yt-3.0/commits/e04142fa31be/
Changeset: e04142fa31be
Branch: yt-3.0
User: ChrisMalone
Date: 2013-11-21 20:53:23
Summary: this at least reads in the 2d datasets, but weighting isn't quite right
Affected #: 1 file
diff -r a65e2f8c645755837f9016363ef470ec71e37371 -r e04142fa31befe213b454ae87e998ca7ea46db23 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -52,11 +52,21 @@
# instead of e's.
_scinot_finder = re.compile(r"[-+]?[0-9]*\.?[0-9]+([eEdD][-+]?[0-9]+)?")
# This is the dimensions in the Cell_H file for each level
-_dim_finder = re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")
+# It is different for different dimensionalities, so we make a list
+_dim_finder = [ \
+ re.compile(r"\(\((\d+)\) \((\d+)\) \(\d+\)\)$"),
+ re.compile(r"\(\((\d+,\d+)\) \((\d+,\d+)\) \(\d+,\d+\)\)$"),
+ re.compile(r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \(\d+,\d+,\d+\)\)$")]
# This is the line that prefixes each set of data for a FAB in the FAB file
-_header_pattern = re.compile(r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), " +
- r"\(([0-9 ]+)\)\)\)\(\((\d+,\d+,\d+)\) " +
- "\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")
+# It is different for different dimensionalities, so we make a list
+_endian_regex = r"^FAB \(\(\d+, \([0-9 ]+\)\),\((\d+), \(([0-9 ]+)\)\)\)"
+_header_pattern = [ \
+ re.compile(_endian_regex +
+ r"\(\((\d+)\) \((\d+)\) \((\d+)\)\) (\d+)\n"),
+ re.compile(_endian_regex +
+ r"\(\((\d+,\d+)\) \((\d+,\d+)\) \((\d+,\d+)\)\) (\d+)\n"),
+ re.compile(_endian_regex +
+ r"\(\((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\) \((\d+,\d+,\d+)\)\) (\d+)\n")]
@@ -141,6 +151,7 @@
GridGeometryHandler.__init__(self, pf, data_style)
self._cache_endianness(self.grids[-1])
+
#self._read_particles()
def _parse_hierarchy(self):
@@ -149,31 +160,50 @@
"""
self.max_level = self.parameter_file._max_level
header_file = open(self.header_filename,'r')
- # We can now skip to the point in the file we want to start parsing at.
+
+ self.dimensionality = self.parameter_file.dimensionality
+ _our_dim_finder = _dim_finder[self.dimensionality-1]
+ DRE = self.parameter_file.domain_right_edge # shortcut
+ DLE = self.parameter_file.domain_left_edge # shortcut
+
+ # We can now skip to the point in the file we want to start parsing.
header_file.seek(self.parameter_file._header_mesh_start)
dx = []
for i in range(self.max_level + 1):
dx.append([float(v) for v in header_file.next().split()])
+ # account for non-3d data sets
+ if self.dimensionality < 2:
+ dx[i].append(DRE[1] - DLE[1])
+ if self.dimensionality < 3:
+ dx[i].append(DRE[2] - DLE[1])
self.level_dds = np.array(dx, dtype="float64")
if int(header_file.next()) != 0:
raise RunTimeError("yt only supports cartesian coordinates.")
if int(header_file.next()) != 0:
raise RunTimeError("INTERNAL ERROR! This should be a zero.")
- # each level is one group with ngrids on it. each grid has 3 lines of 2 reals
+ # each level is one group with ngrids on it.
+ # each grid has self.dimensionality number of lines of 2 reals
self.grids = []
grid_counter = 0
for level in range(self.max_level + 1):
vals = header_file.next().split()
- # should this be grid_time or level_time??
- lev, ngrids, grid_time = int(vals[0]),int(vals[1]),float(vals[2])
+ lev, ngrids, cur_time = int(vals[0]),int(vals[1]),float(vals[2])
assert(lev == level)
nsteps = int(header_file.next())
for gi in range(ngrids):
xlo, xhi = [float(v) for v in header_file.next().split()]
- ylo, yhi = [float(v) for v in header_file.next().split()]
- zlo, zhi = [float(v) for v in header_file.next().split()]
+ if self.dimensionality > 1:
+ ylo, yhi = [float(v) for v in header_file.next().split()]
+ else:
+# ylo, yhi = DLE[1], DRE[1]
+ ylo, yhi = 0.0, 1.0
+ if self.dimensionality > 2:
+ zlo, zhi = [float(v) for v in header_file.next().split()]
+ else:
+# zlo, zhi = DLE[2], DRE[2]
+ zlo, zhi = 0.0, 1.0
self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
# Now we get to the level header filename, which we open and parse.
@@ -181,14 +211,13 @@
header_file.next().strip())
level_header_file = open(fn + "_H")
level_dir = os.path.dirname(fn)
- # We skip the first two lines, although they probably contain
- # useful information I don't know how to decipher.
+ # We skip the first two lines, which contain BoxLib header file
+ # version and 'how' the data was written
level_header_file.next()
level_header_file.next()
- # Now we get the number of data files
+ # Now we get the number of components
ncomp_this_file = int(level_header_file.next())
- # Skip the next line, and we should get the number of grids / FABs
- # in this file
+ # Skip the next line, which contains the number of ghost zones
level_header_file.next()
# To decipher this next line, we expect something like:
# (8 0
@@ -197,7 +226,10 @@
# Now we can iterate over each and get the indices.
for gi in range(ngrids):
# components within it
- start, stop = _dim_finder.match(level_header_file.next()).groups()
+ start, stop = _our_dim_finder.match(level_header_file.next()).groups()
+ # fix for non-3d data
+ start += ',0'*(3-self.dimensionality)
+ stop += ',1'*(3-self.dimensionality)
start = np.array(start.split(","), dtype="int64")
stop = np.array(stop.split(","), dtype="int64")
dims = stop - start + 1
@@ -206,8 +238,7 @@
# Now we read two more lines. The first of these is a close
# parenthesis.
level_header_file.next()
- # This line I'm not 100% sure of, but it's either number of grids
- # or number of FABfiles.
+ # The next is again the number of grids
level_header_file.next()
# Now we iterate over grids to find their offsets in each file.
for gi in range(ngrids):
@@ -235,7 +266,7 @@
header = f.readline()
bpr, endian, start, stop, centering, nc = \
- _header_pattern.search(header).groups()
+ _header_pattern[self.dimensionality-1].search(header).groups()
# Note that previously we were using a different value for BPR than we
# use now. Here is an example set of information directly from BoxLib:
# * DOUBLE data
@@ -494,8 +525,8 @@
for i in range(n_fields)]
self.dimensionality = int(header_file.readline())
- if self.dimensionality != 3:
- raise RunTimeError("Boxlib 1D and 2D support not currently available.")
+# if self.dimensionality != 3:
+# raise RunTimeError("Boxlib 1D and 2D support not currently available.")
self.current_time = float(header_file.readline())
# This is traditionally a hierarchy attribute, so we will set it, but
# in a slightly hidden variable.
@@ -546,6 +577,40 @@
header_file.readline()
self._header_mesh_start = header_file.tell()
+ # overrides for 1/2-dimensional data
+ if self.dimensionality == 1:
+ self._setup1d()
+ elif self.dimensionality == 2:
+ self._setup2d()
+
+ def _setup1d(self):
+# self._hierarchy_class = BoxlibHierarchy1D
+# self._fieldinfo_fallback = Orion1DFieldInfo
+ self.domain_left_edge = \
+ np.concatenate([self.domain_left_edge, [0.0, 0.0]])
+ self.domain_right_edge = \
+ np.concatenate([self.domain_right_edge, [1.0, 1.0]])
+ tmp = self.domain_dimensions.tolist()
+ tmp.extend((1,1))
+ self.domain_dimensions = np.array(tmp)
+ tmp = list(self.periodicity)
+ tmp[1:] = False
+ self.periodicity = ensure_tuple(tmp)
+
+ def _setup2d(self):
+# self._hierarchy_class = BoxlibHierarchy2D
+# self._fieldinfo_fallback = Orion2DFieldInfo
+ self.domain_left_edge = \
+ np.concatenate([self.domain_left_edge, [0.0]])
+ self.domain_right_edge = \
+ np.concatenate([self.domain_right_edge, [1.0]])
+ tmp = self.domain_dimensions.tolist()
+ tmp.append(1)
+ self.domain_dimensions = np.array(tmp)
+ tmp = list(self.periodicity)
+ tmp[2] = False
+ self.periodicity = ensure_tuple(tmp)
+
def _set_units(self):
"""
Generates the conversion to various physical _units based on the parameter file
https://bitbucket.org/yt_analysis/yt-3.0/commits/fe4cb143777c/
Changeset: fe4cb143777c
Branch: yt-3.0
User: ChrisMalone
Date: 2013-11-21 23:15:03
Summary: the culprit was the grid_dimensions definition
Affected #: 1 file
diff -r e04142fa31befe213b454ae87e998ca7ea46db23 -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 yt/frontends/boxlib/data_structures.py
--- a/yt/frontends/boxlib/data_structures.py
+++ b/yt/frontends/boxlib/data_structures.py
@@ -197,12 +197,10 @@
if self.dimensionality > 1:
ylo, yhi = [float(v) for v in header_file.next().split()]
else:
-# ylo, yhi = DLE[1], DRE[1]
ylo, yhi = 0.0, 1.0
if self.dimensionality > 2:
zlo, zhi = [float(v) for v in header_file.next().split()]
else:
-# zlo, zhi = DLE[2], DRE[2]
zlo, zhi = 0.0, 1.0
self.grid_left_edge[grid_counter + gi, :] = [xlo, ylo, zlo]
self.grid_right_edge[grid_counter + gi, :] = [xhi, yhi, zhi]
@@ -227,9 +225,10 @@
for gi in range(ngrids):
# components within it
start, stop = _our_dim_finder.match(level_header_file.next()).groups()
- # fix for non-3d data
+ # fix for non-3d data
+ # note we append '0' to both ends b/c of the '+1' in dims below
start += ',0'*(3-self.dimensionality)
- stop += ',1'*(3-self.dimensionality)
+ stop += ',0'*(3-self.dimensionality)
start = np.array(start.split(","), dtype="int64")
stop = np.array(stop.split(","), dtype="int64")
dims = stop - start + 1
@@ -525,8 +524,6 @@
for i in range(n_fields)]
self.dimensionality = int(header_file.readline())
-# if self.dimensionality != 3:
-# raise RunTimeError("Boxlib 1D and 2D support not currently available.")
self.current_time = float(header_file.readline())
# This is traditionally a hierarchy attribute, so we will set it, but
# in a slightly hidden variable.
https://bitbucket.org/yt_analysis/yt-3.0/commits/a788d5a847d9/
Changeset: a788d5a847d9
Branch: yt-3.0
User: ChrisMalone
Date: 2013-11-21 23:24:21
Summary: Merged yt_analysis/yt-3.0 into yt-3.0
Affected #: 66 files
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
include distribute_setup.py README* CREDITS COPYING.txt CITATION
recursive-include yt/gui/reason/html *.html *.png *.ico *.js
recursive-include yt *.pyx *.pxd *.h README*
-recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE kdtree2-README
+recursive-include yt/utilities/kdtree *.f90 *.v Makefile LICENSE
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 README
--- a/README
+++ b/README
@@ -1,11 +1,12 @@
-Hi there! You've just downloaded yt, an analysis tool for astrophysical
-simulation datasets, generated by simulation platforms like Enzo, Orion, FLASH,
-Nyx, MAESTRO, ART and Ramses. It's written in python and heavily leverages
-both NumPy and Matplotlib for fast arrays and visualization, respectively.
+Hi there! You've just downloaded yt, an analysis tool for scientific
+datasets, generated on a variety of data platforms. It's written in
+python and heavily leverages both NumPy and Matplotlib for fast arrays and
+visualization, respectively.
Full documentation and a user community can be found at:
http://yt-project.org/
+
http://yt-project.org/doc/
If you have used Python before, and are comfortable with installing packages,
@@ -16,9 +17,7 @@
doc/install_script.sh . You will have to set the destination directory, and
there are options available, but it should be straightforward.
-In case of any problems, please email the yt-users mailing list, and if you're
-interested in helping out, see the developer documentation:
-
-http://yt-project.org/doc/advanced/developing.html
+For more information on installation, what to do if you run into problems, or
+ways to help development, please visit our website.
Enjoy!
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 doc/install_script.sh
--- a/doc/install_script.sh
+++ b/doc/install_script.sh
@@ -555,6 +555,11 @@
echo 'de6d8c6ea849f0206d219303329a0276b3cce7c051eec34377d42aacbe0a4f47ac5145eb08966a338ecddd2b83c8f787ca9956508ad5c39ee2088ad875166410 xray_emissivity.h5' > xray_emissivity.h5.sha512
get_ytdata xray_emissivity.h5
+# Set paths to what they should be when yt is activated.
+export PATH=${DEST_DIR}/bin:$PATH
+export LD_LIBRARY_PATH=${DEST_DIR}/lib:$LD_LIBRARY_PATH
+export PYTHONPATH=${DEST_DIR}/lib/python2.7/site-packages
+
mkdir -p ${DEST_DIR}/src
cd ${DEST_DIR}/src
@@ -918,6 +923,8 @@
do_setup_py $SYMPY
[ $INST_PYX -eq 1 ] && do_setup_py $PYX
+( ${DEST_DIR}/bin/pip install jinja2 2>&1 ) 1>> ${LOG_FILE}
+
# Now we build Rockstar and set its environment variable.
if [ $INST_ROCKSTAR -eq 1 ]
then
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum_fit.py
@@ -86,6 +86,10 @@
#Empty fit without any lines
yFit = na.ones(len(fluxData))
+ #Force the first and last flux pixel to be 1 to prevent OOB
+ fluxData[0]=1
+ fluxData[-1]=1
+
#Find all regions where lines/groups of lines are present
cBounds = _find_complexes(x, fluxData, fitLim=fitLim,
complexLim=complexLim, minLength=minLength,
@@ -120,9 +124,10 @@
z,fitLim,minError*(b[2]-b[1]),speciesDict)
#Check existence of partner lines if applicable
- newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
- b, minError*(b[2]-b[1]),
- x0, xRes, speciesDict)
+ if len(speciesDict['wavelength']) != 1:
+ newLinesP = _remove_unaccepted_partners(newLinesP, x, fluxData,
+ b, minError*(b[2]-b[1]),
+ x0, xRes, speciesDict)
#If flagged as a bad fit, species is lyman alpha,
# and it may be a saturated line, use special tools
@@ -548,6 +553,10 @@
#Index of the redshifted wavelength
indexRedWl = (redWl-x0)/xRes
+ #Check to see if even in flux range
+ if indexRedWl > len(y):
+ return False
+
#Check if surpasses minimum absorption bound
if y[int(indexRedWl)]>fluxMin:
return False
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/api.py
--- a/yt/analysis_modules/api.py
+++ b/yt/analysis_modules/api.py
@@ -108,3 +108,16 @@
from .radmc3d_export.api import \
RadMC3DWriter
+from .particle_trajectories.api import \
+ ParticleTrajectories
+
+from .photon_simulator.api import \
+ PhotonList, \
+ EventList, \
+ SpectralModel, \
+ XSpecThermalModel, \
+ XSpecAbsorbModel, \
+ TableApecModel, \
+ TableAbsorbModel, \
+ PhotonModel, \
+ ThermalPhotonModel
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/cosmological_observation/cosmology_splice.py
--- a/yt/analysis_modules/cosmological_observation/cosmology_splice.py
+++ b/yt/analysis_modules/cosmological_observation/cosmology_splice.py
@@ -113,7 +113,18 @@
self._calculate_deltaz_min(deltaz_min=deltaz_min)
cosmology_splice = []
-
+
+ if near_redshift == far_redshift:
+ self.simulation.get_time_series(redshifts=[near_redshift])
+ cosmology_splice.append({'time': self.simulation[0].current_time,
+ 'redshift': self.simulation[0].current_redshift,
+ 'filename': os.path.join(self.simulation[0].fullpath,
+ self.simulation[0].basename),
+ 'next': None})
+ mylog.info("create_cosmology_splice: Using %s for z = %f ." %
+ (cosmology_splice[0]['filename'], near_redshift))
+ return cosmology_splice
+
# Use minimum number of datasets to go from z_i to z_f.
if minimal:
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -28,6 +28,9 @@
only_on_root, \
parallel_objects, \
parallel_root_only
+from yt.utilities.physical_constants import \
+ speed_of_light_cgs, \
+ cm_per_km
class LightRay(CosmologySplice):
"""
@@ -51,7 +54,9 @@
near_redshift : float
The near (lowest) redshift for the light ray.
far_redshift : float
- The far (highest) redshift for the light ray.
+ The far (highest) redshift for the light ray. NOTE: in order
+ to use only a single dataset in a light ray, set the
+ near_redshift and far_redshift to be the same.
use_minimum_datasets : bool
If True, the minimum number of datasets is used to connect the
initial and final redshift. If false, the light ray solution
@@ -111,65 +116,92 @@
time_data=time_data,
redshift_data=redshift_data)
- def _calculate_light_ray_solution(self, seed=None, filename=None):
+ def _calculate_light_ray_solution(self, seed=None,
+ start_position=None, end_position=None,
+ trajectory=None, filename=None):
"Create list of datasets to be added together to make the light ray."
# Calculate dataset sizes, and get random dataset axes and centers.
np.random.seed(seed)
- # For box coherence, keep track of effective depth travelled.
- box_fraction_used = 0.0
+ # If using only one dataset, set start and stop manually.
+ if start_position is not None:
+ if len(self.light_ray_solution) > 1:
+ raise RuntimeError("LightRay Error: cannot specify start_position if light ray uses more than one dataset.")
+ if not ((end_position is None) ^ (trajectory is None)):
+ raise RuntimeError("LightRay Error: must specify either end_position or trajectory, but not both.")
+ self.light_ray_solution[0]['start'] = np.array(start_position)
+ if end_position is not None:
+ self.light_ray_solution[0]['end'] = np.array(end_position)
+ else:
+ # assume trajectory given as r, theta, phi
+ if len(trajectory) != 3:
+ raise RuntimeError("LightRay Error: trajectory must have lenght 3.")
+ r, theta, phi = trajectory
+ self.light_ray_solution[0]['end'] = self.light_ray_solution[0]['start'] + \
+ r * np.array([np.cos(phi) * np.sin(theta),
+ np.sin(phi) * np.sin(theta),
+ np.cos(theta)])
+ self.light_ray_solution[0]['traversal_box_fraction'] = \
+ vector_length(self.light_ray_solution[0]['start'],
+ self.light_ray_solution[0]['end'])
- for q in range(len(self.light_ray_solution)):
- if (q == len(self.light_ray_solution) - 1):
- z_next = self.near_redshift
- else:
- z_next = self.light_ray_solution[q+1]['redshift']
+ # the normal way (random start positions and trajectories for each dataset)
+ else:
+
+ # For box coherence, keep track of effective depth travelled.
+ box_fraction_used = 0.0
- # Calculate fraction of box required for a depth of delta z
- self.light_ray_solution[q]['traversal_box_fraction'] = \
- self.cosmology.ComovingRadialDistance(\
- z_next, self.light_ray_solution[q]['redshift']) * \
- self.simulation.hubble_constant / \
- self.simulation.box_size
+ for q in range(len(self.light_ray_solution)):
+ if (q == len(self.light_ray_solution) - 1):
+ z_next = self.near_redshift
+ else:
+ z_next = self.light_ray_solution[q+1]['redshift']
- # Simple error check to make sure more than 100% of box depth
- # is never required.
- if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
- mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
- (self.light_ray_solution[q]['redshift'], z_next,
- self.light_ray_solution[q]['traversal_box_fraction']))
- mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
- (self.light_ray_solution[q]['deltazMax'],
- self.light_ray_solution[q]['redshift']-z_next))
+ # Calculate fraction of box required for a depth of delta z
+ self.light_ray_solution[q]['traversal_box_fraction'] = \
+ self.cosmology.ComovingRadialDistance(\
+ z_next, self.light_ray_solution[q]['redshift']) * \
+ self.simulation.hubble_constant / \
+ self.simulation.box_size
- # Get dataset axis and center.
- # If using box coherence, only get start point and vector if
- # enough of the box has been used,
- # or if box_fraction_used will be greater than 1 after this slice.
- if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
- (box_fraction_used >
- self.minimum_coherent_box_fraction) or \
- (box_fraction_used +
- self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
- # Random start point
- self.light_ray_solution[q]['start'] = np.random.random(3)
- theta = np.pi * np.random.random()
- phi = 2 * np.pi * np.random.random()
- box_fraction_used = 0.0
- else:
- # Use end point of previous segment and same theta and phi.
- self.light_ray_solution[q]['start'] = \
- self.light_ray_solution[q-1]['end'][:]
+ # Simple error check to make sure more than 100% of box depth
+ # is never required.
+ if (self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+ mylog.error("Warning: box fraction required to go from z = %f to %f is %f" %
+ (self.light_ray_solution[q]['redshift'], z_next,
+ self.light_ray_solution[q]['traversal_box_fraction']))
+ mylog.error("Full box delta z is %f, but it is %f to the next data dump." %
+ (self.light_ray_solution[q]['deltazMax'],
+ self.light_ray_solution[q]['redshift']-z_next))
- self.light_ray_solution[q]['end'] = \
- self.light_ray_solution[q]['start'] + \
- self.light_ray_solution[q]['traversal_box_fraction'] * \
- np.array([np.cos(phi) * np.sin(theta),
- np.sin(phi) * np.sin(theta),
- np.cos(theta)])
- box_fraction_used += \
- self.light_ray_solution[q]['traversal_box_fraction']
+ # Get dataset axis and center.
+ # If using box coherence, only get start point and vector if
+ # enough of the box has been used,
+ # or if box_fraction_used will be greater than 1 after this slice.
+ if (q == 0) or (self.minimum_coherent_box_fraction == 0) or \
+ (box_fraction_used >
+ self.minimum_coherent_box_fraction) or \
+ (box_fraction_used +
+ self.light_ray_solution[q]['traversal_box_fraction'] > 1.0):
+ # Random start point
+ self.light_ray_solution[q]['start'] = np.random.random(3)
+ theta = np.pi * np.random.random()
+ phi = 2 * np.pi * np.random.random()
+ box_fraction_used = 0.0
+ else:
+ # Use end point of previous segment and same theta and phi.
+ self.light_ray_solution[q]['start'] = \
+ self.light_ray_solution[q-1]['end'][:]
+
+ self.light_ray_solution[q]['end'] = \
+ self.light_ray_solution[q]['start'] + \
+ self.light_ray_solution[q]['traversal_box_fraction'] * \
+ np.array([np.cos(phi) * np.sin(theta),
+ np.sin(phi) * np.sin(theta),
+ np.cos(theta)])
+ box_fraction_used += \
+ self.light_ray_solution[q]['traversal_box_fraction']
if filename is not None:
self._write_light_ray_solution(filename,
@@ -178,7 +210,10 @@
'far_redshift':self.far_redshift,
'near_redshift':self.near_redshift})
- def make_light_ray(self, seed=None, fields=None,
+ def make_light_ray(self, seed=None,
+ start_position=None, end_position=None,
+ trajectory=None,
+ fields=None,
solution_filename=None, data_filename=None,
get_los_velocity=False,
get_nearest_halo=False,
@@ -197,6 +232,19 @@
seed : int
Seed for the random number generator.
Default: None.
+ start_position : list of floats
+ Used only if creating a light ray from a single dataset.
+ The coordinates of the starting position of the ray.
+ Default: None.
+ end_position : list of floats
+ Used only if creating a light ray from a single dataset.
+ The coordinates of the ending position of the ray.
+ Default: None.
+ trajectory : list of floats
+ Used only if creating a light ray from a single dataset.
+ The (r, theta, phi) direction of the light ray. Use either
+ end_position or trajectory, not both.
+ Default: None.
fields : list
A list of fields for which to get data.
Default: None.
@@ -313,7 +361,11 @@
nearest_halo_fields = []
# Calculate solution.
- self._calculate_light_ray_solution(seed=seed, filename=solution_filename)
+ self._calculate_light_ray_solution(seed=seed,
+ start_position=start_position,
+ end_position=end_position,
+ trajectory=trajectory,
+ filename=solution_filename)
# Initialize data structures.
self._data = {}
@@ -335,9 +387,18 @@
for my_storage, my_segment in parallel_objects(self.light_ray_solution,
storage=all_ray_storage,
njobs=njobs, dynamic=dynamic):
- mylog.info("Creating ray segment at z = %f." %
- my_segment['redshift'])
- if my_segment['next'] is None:
+
+ # Load dataset for segment.
+ pf = load(my_segment['filename'])
+
+ if self.near_redshift == self.far_redshift:
+ h_vel = cm_per_km * pf.units['mpc'] * \
+ vector_length(my_segment['start'], my_segment['end']) * \
+ self.cosmology.HubbleConstantNow * \
+ self.cosmology.ExpansionFactor(my_segment['redshift'])
+ next_redshift = np.sqrt((1. + h_vel / speed_of_light_cgs) /
+ (1. - h_vel / speed_of_light_cgs)) - 1.
+ elif my_segment['next'] is None:
next_redshift = self.near_redshift
else:
next_redshift = my_segment['next']['redshift']
@@ -346,9 +407,6 @@
(my_segment['redshift'], my_segment['start'],
my_segment['end']))
- # Load dataset for segment.
- pf = load(my_segment['filename'])
-
# Break periodic ray into non-periodic segments.
sub_segments = periodic_ray(my_segment['start'], my_segment['end'])
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/api.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/api.py
@@ -0,0 +1,12 @@
+"""
+API for particle_trajectories
+"""
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from particle_trajectories import ParticleTrajectories
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/particle_trajectories.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/particle_trajectories.py
@@ -0,0 +1,329 @@
+"""
+Particle trajectories
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from yt.data_objects.data_containers import YTFieldData
+from yt.data_objects.time_series import TimeSeriesData
+from yt.utilities.lib import CICSample_3
+from yt.funcs import *
+
+import numpy as np
+import h5py
+
+class ParticleTrajectories(object):
+ r"""A collection of particle trajectories in time over a series of
+ parameter files.
+
+ The ParticleTrajectories object contains a collection of
+ particle trajectories for a specified set of particle indices.
+
+ Parameters
+ ----------
+ filenames : list of strings
+ A time-sorted list of filenames to construct the TimeSeriesData
+ object.
+ indices : array_like
+ An integer array of particle indices whose trajectories we
+ want to track. If they are not sorted they will be sorted.
+ fields : list of strings, optional
+ A set of fields that is retrieved when the trajectory
+ collection is instantiated.
+ Default : None (will default to the fields 'particle_position_x',
+ 'particle_position_y', 'particle_position_z')
+
+ Examples
+ ________
+ >>> from yt.mods import *
+ >>> my_fns = glob.glob("orbit_hdf5_chk_00[0-9][0-9]")
+ >>> my_fns.sort()
+ >>> fields = ["particle_position_x", "particle_position_y",
+ >>> "particle_position_z", "particle_velocity_x",
+ >>> "particle_velocity_y", "particle_velocity_z"]
+ >>> pf = load(my_fns[0])
+ >>> init_sphere = pf.h.sphere(pf.domain_center, (.5, "unitary"))
+ >>> indices = init_sphere["particle_index"].astype("int")
+ >>> trajs = ParticleTrajectories(my_fns, indices, fields=fields)
+ >>> for t in trajs :
+ >>> print t["particle_velocity_x"].max(), t["particle_velocity_x"].min()
+
+ Notes
+ -----
+ As of this time only particle trajectories that are complete over the
+ set of specified parameter files are supported. If any particle's history
+ ends for some reason (e.g. leaving the simulation domain or being actively
+ destroyed), the whole trajectory collection of which it is a set must end
+ at or before the particle's last timestep. This is a limitation we hope to
+ lift at some point in the future.
+ """
+ def __init__(self, filenames, indices, fields=None) :
+
+ indices.sort() # Just in case the caller wasn't careful
+
+ self.field_data = YTFieldData()
+ self.pfs = TimeSeriesData.from_filenames(filenames)
+ self.masks = []
+ self.sorts = []
+ self.indices = indices
+ self.num_indices = len(indices)
+ self.num_steps = len(filenames)
+ self.times = []
+
+ # Default fields
+
+ if fields is None: fields = []
+
+ # Must ALWAYS have these fields
+
+ fields = fields + ["particle_position_x",
+ "particle_position_y",
+ "particle_position_z"]
+
+ # Set up the derived field list and the particle field list
+ # so that if the requested field is a particle field, we'll
+ # just copy the field over, but if the field is a grid field,
+ # we will first interpolate the field to the particle positions
+ # and then return the field.
+
+ pf = self.pfs[0]
+ self.derived_field_list = pf.h.derived_field_list
+ self.particle_fields = [field for field in self.derived_field_list
+ if pf.field_info[field].particle_type]
+
+ """
+ The following loops through the parameter files
+ and performs two tasks. The first is to isolate
+ the particles with the correct indices, and the
+ second is to create a sorted list of these particles.
+ We also make a list of the current time from each file.
+ Right now, the code assumes (and checks for) the
+ particle indices existing in each dataset, a limitation I
+ would like to lift at some point since some codes
+ (e.g., FLASH) destroy particles leaving the domain.
+ """
+
+ for pf in self.pfs:
+ dd = pf.h.all_data()
+ newtags = dd["particle_index"].astype("int")
+ if not np.all(np.in1d(indices, newtags, assume_unique=True)):
+ print "Not all requested particle ids contained in this dataset!"
+ raise IndexError
+ mask = np.in1d(newtags, indices, assume_unique=True)
+ sorts = np.argsort(newtags[mask])
+ self.masks.append(mask)
+ self.sorts.append(sorts)
+ self.times.append(pf.current_time)
+
+ self.times = np.array(self.times)
+
+ # Now instantiate the requested fields
+ for field in fields:
+ self._get_data(field)
+
+ def has_key(self, key):
+ return (key in self.field_data)
+
+ def keys(self):
+ return self.field_data.keys()
+
+ def __getitem__(self, key):
+ """
+ Get the field associated with key,
+ checking to make sure it is a particle field.
+ """
+ if key == "particle_time":
+ return self.times
+ if not self.field_data.has_key(key):
+ self._get_data(key)
+ return self.field_data[key]
+
+ def __setitem__(self, key, val):
+ """
+ Sets a field to be some other value.
+ """
+ self.field_data[key] = val
+
+ def __delitem__(self, key):
+ """
+ Delete the field from the trajectory
+ """
+ del self.field_data[key]
+
+ def __iter__(self):
+ """
+ This iterates over the trajectories for
+ the different particles, returning dicts
+ of fields for each trajectory
+ """
+ for idx in xrange(self.num_indices):
+ traj = {}
+ traj["particle_index"] = self.indices[idx]
+ traj["particle_time"] = self.times
+ for field in self.field_data.keys():
+ traj[field] = self[field][idx,:]
+ yield traj
+
+ def __len__(self):
+ """
+ The number of individual trajectories
+ """
+ return self.num_indices
+
+ def add_fields(self, fields):
+ """
+ Add a list of fields to an existing trajectory
+
+ Parameters
+ ----------
+ fields : list of strings
+ A list of fields to be added to the current trajectory
+ collection.
+
+ Examples
+ ________
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.add_fields(["particle_mass", "particle_gpot"])
+ """
+ for field in fields:
+ if not self.field_data.has_key(field):
+ self._get_data(field)
+
+ def _get_data(self, field):
+ """
+ Get a field to include in the trajectory collection.
+ The trajectory collection itself is a dict of 2D numpy arrays,
+ with shape (num_indices, num_steps)
+ """
+ if not self.field_data.has_key(field):
+ particles = np.empty((0))
+ step = int(0)
+ for pf, mask, sort in zip(self.pfs, self.masks, self.sorts):
+ if field in self.particle_fields:
+ # This is easy... just get the particle fields
+ dd = pf.h.all_data()
+ pfield = dd[field][mask]
+ particles = np.append(particles, pfield[sort])
+ else:
+ # This is hard... must loop over grids
+ pfield = np.zeros((self.num_indices))
+ x = self["particle_position_x"][:,step]
+ y = self["particle_position_y"][:,step]
+ z = self["particle_position_z"][:,step]
+ particle_grids, particle_grid_inds = pf.h.find_points(x,y,z)
+ for grid in particle_grids:
+ cube = grid.retrieve_ghost_zones(1, [field])
+ CICSample_3(x,y,z,pfield,
+ self.num_indices,
+ cube[field],
+ np.array(grid.LeftEdge).astype(np.float64),
+ np.array(grid.ActiveDimensions).astype(np.int32),
+ np.float64(grid['dx']))
+ particles = np.append(particles, pfield)
+ step += 1
+ self[field] = particles.reshape(self.num_steps,
+ self.num_indices).transpose()
+ return self.field_data[field]
+
+ def trajectory_from_index(self, index):
+ """
+ Retrieve a single trajectory corresponding to a specific particle
+ index
+
+ Parameters
+ ----------
+ index : int
+ This defines which particle trajectory from the
+ ParticleTrajectories object will be returned.
+
+ Returns
+ -------
+ A dictionary corresponding to the particle's trajectory and the
+ fields along that trajectory
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> import matplotlib.pylab as pl
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> traj = trajs.trajectory_from_index(indices[0])
+ >>> pl.plot(traj["particle_time"], traj["particle_position_x"], "-x")
+ >>> pl.savefig("orbit")
+ """
+ mask = np.in1d(self.indices, (index,), assume_unique=True)
+ if not np.any(mask):
+ print "The particle index %d is not in the list!" % (index)
+ raise IndexError
+ fields = [field for field in sorted(self.field_data.keys())]
+ traj = {}
+ traj["particle_time"] = self.times
+ traj["particle_index"] = index
+ for field in fields:
+ traj[field] = self[field][mask,:][0]
+ return traj
+
+ def write_out(self, filename_base):
+ """
+ Write out particle trajectories to tab-separated ASCII files (one
+ for each trajectory) with the field names in the file header. Each
+ file is named with a basename and the index number.
+
+ Parameters
+ ----------
+ filename_base : string
+ The prefix for the outputted ASCII files.
+
+ Examples
+ --------
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.write_out("orbit_trajectory")
+ """
+ fields = [field for field in sorted(self.field_data.keys())]
+ num_fields = len(fields)
+ first_str = "# particle_time\t" + "\t".join(fields)+"\n"
+ template_str = "%g\t"*num_fields+"%g\n"
+ for ix in xrange(self.num_indices):
+ outlines = [first_str]
+ for it in xrange(self.num_steps):
+ outlines.append(template_str %
+ tuple([self.times[it]]+[self[field][ix,it] for field in fields]))
+ fid = open(filename_base + "_%d.dat" % self.indices[ix], "w")
+ fid.writelines(outlines)
+ fid.close()
+ del fid
+
+ def write_out_h5(self, filename):
+ """
+ Write out all the particle trajectories to a single HDF5 file
+ that contains the indices, the times, and the 2D array for each
+ field individually
+
+ Parameters
+ ----------
+
+ filename : string
+ The output filename for the HDF5 file
+
+ Examples
+ --------
+
+ >>> from yt.mods import *
+ >>> trajs = ParticleTrajectories(my_fns, indices)
+ >>> trajs.write_out_h5("orbit_trajectories")
+ """
+ fid = h5py.File(filename, "w")
+ fields = [field for field in sorted(self.field_data.keys())]
+ fid.create_dataset("particle_indices", dtype=np.int32,
+ data=self.indices)
+ fid.create_dataset("particle_time", data=self.times)
+ for field in fields:
+ fid.create_dataset("%s" % field, data=self[field])
+ fid.close()
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/particle_trajectories/setup.py
--- /dev/null
+++ b/yt/analysis_modules/particle_trajectories/setup.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+import setuptools
+import os
+import sys
+import os.path
+
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('particle_trajectories', parent_package, top_path)
+ #config.add_subpackage("tests")
+ config.make_config_py() # installs __config__.py
+ #config.make_svn_version_py()
+ return config
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/photon_simulator/api.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/api.py
@@ -0,0 +1,26 @@
+"""
+API for yt.analysis_modules.photon_simulator.
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+from .photon_models import \
+ PhotonModel, \
+ ThermalPhotonModel
+
+from .photon_simulator import \
+ PhotonList, \
+ EventList
+
+from .spectral_models import \
+ SpectralModel, \
+ XSpecThermalModel, \
+ XSpecAbsorbModel, \
+ TableApecModel, \
+ TableAbsorbModel
diff -r fe4cb143777c277c815304efbc07e03b2d5e7dd2 -r a788d5a847d925ef1d57f639db889ca273b651e0 yt/analysis_modules/photon_simulator/photon_models.py
--- /dev/null
+++ b/yt/analysis_modules/photon_simulator/photon_models.py
@@ -0,0 +1,205 @@
+"""
+Classes for specific photon models
+
+The algorithms used here are based off of the method used by the
+PHOX code (http://www.mpa-garching.mpg.de/~kdolag/Phox/),
+developed by Veronica Biffi and Klaus Dolag. References for
+PHOX may be found at:
+
+Biffi, V., Dolag, K., Bohringer, H., & Lemson, G. 2012, MNRAS, 420, 3545
+http://adsabs.harvard.edu/abs/2012MNRAS.420.3545B
+
+Biffi, V., Dolag, K., Bohringer, H. 2013, MNRAS, 428, 1395
+http://adsabs.harvard.edu/abs/2013MNRAS.428.1395B
+
+"""
+
+#-----------------------------------------------------------------------------
+# Copyright (c) 2013, yt Development Team.
+#
+# Distributed under the terms of the Modified BSD License.
+#
+# The full license is in the file COPYING.txt, distributed with this software.
+#-----------------------------------------------------------------------------
+
+import numpy as np
+from yt.funcs import *
+from yt.utilities.physical_constants import \
+ mp, cm_per_km, K_per_keV, cm_per_mpc
+from yt.utilities.parallel_tools.parallel_analysis_interface import \
+ communication_system
+
+N_TBIN = 10000
+TMIN = 8.08e-2
+TMAX = 50.
+
+comm = communication_system.communicators[-1]
+
+class PhotonModel(object):
+
+ def __init__(self):
+ pass
+
+ def __call__(self, data_source, parameters):
+ photons = {}
+ return photons
+
+class ThermalPhotonModel(PhotonModel):
+ r"""
+ Initialize a ThermalPhotonModel from a thermal spectrum.
+
+ Parameters
+ ----------
+
+ spectral_model : `SpectralModel`
+ A thermal spectral model instance, either of `XSpecThermalModel`
+ or `TableApecModel`.
+ X_H : float, optional
+ The hydrogen mass fraction.
+ Zmet : float or string, optional
+ The metallicity. If a float, assumes a constant metallicity throughout.
+ If a string, is taken to be the name of the metallicity field.
+ """
+ def __init__(self, spectral_model, X_H=0.75, Zmet=0.3):
+ self.X_H = X_H
+ self.Zmet = Zmet
+ self.spectral_model = spectral_model
+
+ def __call__(self, data_source, parameters):
+
+ pf = data_source.pf
+
+ exp_time = parameters["FiducialExposureTime"]
+ area = parameters["FiducialArea"]
+ redshift = parameters["FiducialRedshift"]
+ D_A = parameters["FiducialAngularDiameterDistance"]*cm_per_mpc
+ dist_fac = 1.0/(4.*np.pi*D_A*D_A*(1.+redshift)**3)
+
+ vol_scale = pf.units["cm"]**(-3)/np.prod(pf.domain_width)
+
+ num_cells = data_source["Temperature"].shape[0]
+ start_c = comm.rank*num_cells/comm.size
+ end_c = (comm.rank+1)*num_cells/comm.size
+
+ kT = data_source["Temperature"][start_c:end_c].copy()/K_per_keV
+ vol = data_source["CellVolume"][start_c:end_c].copy()
+ dx = data_source["dx"][start_c:end_c].copy()
+ EM = (data_source["Density"][start_c:end_c].copy()/mp)**2
+ EM *= 0.5*(1.+self.X_H)*self.X_H*vol
+
+ data_source.clear_data()
+
+ x = data_source["x"][start_c:end_c].copy()
+ y = data_source["y"][start_c:end_c].copy()
+ z = data_source["z"][start_c:end_c].copy()
+
+ data_source.clear_data()
+
+ vx = data_source["x-velocity"][start_c:end_c].copy()
+ vy = data_source["y-velocity"][start_c:end_c].copy()
+ vz = data_source["z-velocity"][start_c:end_c].copy()
+
+ if isinstance(self.Zmet, basestring):
+ metalZ = data_source[self.Zmet][start_c:end_c].copy()
+ else:
+ metalZ = self.Zmet*np.ones(EM.shape)
+
+ data_source.clear_data()
+
+ idxs = np.argsort(kT)
+ dshape = idxs.shape
+
+ kT_bins = np.linspace(TMIN, max(kT[idxs][-1], TMAX), num=N_TBIN+1)
+ dkT = kT_bins[1]-kT_bins[0]
+ kT_idxs = np.digitize(kT[idxs], kT_bins)
+ kT_idxs = np.minimum(np.maximum(1, kT_idxs), N_TBIN) - 1
+ bcounts = np.bincount(kT_idxs).astype("int")
+ bcounts = bcounts[bcounts > 0]
+ n = int(0)
+ bcell = []
+ ecell = []
+ for bcount in bcounts:
+ bcell.append(n)
+ ecell.append(n+bcount)
+ n += bcount
+ kT_idxs = np.unique(kT_idxs)
+
+ self.spectral_model.prepare()
+ energy = self.spectral_model.ebins
+
+ cell_em = EM[idxs]*vol_scale
+ cell_vol = vol[idxs]*vol_scale
+
+ number_of_photons = np.zeros(dshape, dtype='uint64')
+ energies = []
+
+ u = np.random.random(cell_em.shape)
+
+ pbar = get_pbar("Generating Photons", dshape[0])
+
+ for i, ikT in enumerate(kT_idxs):
+
+ ncells = int(bcounts[i])
+ ibegin = bcell[i]
+ iend = ecell[i]
+ kT = kT_bins[ikT] + 0.5*dkT
+
+ em_sum_c = cell_em[ibegin:iend].sum()
+ em_sum_m = (metalZ[ibegin:iend]*cell_em[ibegin:iend]).sum()
+
+ cspec, mspec = self.spectral_model.get_spectrum(kT)
+ cspec *= dist_fac*em_sum_c/vol_scale
+ mspec *= dist_fac*em_sum_m/vol_scale
+
+ cumspec_c = np.cumsum(cspec)
+ counts_c = cumspec_c[:]/cumspec_c[-1]
+ counts_c = np.insert(counts_c, 0, 0.0)
+ tot_ph_c = cumspec_c[-1]*area*exp_time
+
+ cumspec_m = np.cumsum(mspec)
+ counts_m = cumspec_m[:]/cumspec_m[-1]
+ counts_m = np.insert(counts_m, 0, 0.0)
+ tot_ph_m = cumspec_m[-1]*area*exp_time
+
+ for icell in xrange(ibegin, iend):
+
+ cell_norm_c = tot_ph_c*cell_em[icell]/em_sum_c
+ cell_n_c = np.uint64(cell_norm_c) + np.uint64(np.modf(cell_norm_c)[0] >= u[icell])
+
+ cell_norm_m = tot_ph_m*metalZ[icell]*cell_em[icell]/em_sum_m
+ cell_n_m = np.uint64(cell_norm_m) + np.uint64(np.modf(cell_norm_m)[0] >= u[icell])
+
+ cell_n = cell_n_c + cell_n_m
+
+ if cell_n > 0:
+ number_of_photons[icell] = cell_n
+ randvec_c = np.random.uniform(size=cell_n_c)
+ randvec_c.sort()
+ randvec_m = np.random.uniform(size=cell_n_m)
+ randvec_m.sort()
+ cell_e_c = np.interp(randvec_c, counts_c, energy)
+ cell_e_m = np.interp(randvec_m, counts_m, energy)
+ energies.append(np.concatenate([cell_e_c,cell_e_m]))
+
+ pbar.update(icell)
+
+ pbar.finish()
+
+ active_cells = number_of_photons > 0
+ idxs = idxs[active_cells]
+
+ photons = {}
+
+ src_ctr = parameters["center"]
+
+ photons["x"] = (x[idxs]-src_ctr[0])*pf.units["kpc"]
+ photons["y"] = (y[idxs]-src_ctr[1])*pf.units["kpc"]
+ photons["z"] = (z[idxs]-src_ctr[2])*pf.units["kpc"]
+ photons["vx"] = vx[idxs]/cm_per_km
+ photons["vy"] = vy[idxs]/cm_per_km
+ photons["vz"] = vz[idxs]/cm_per_km
+ photons["dx"] = dx[idxs]*pf.units["kpc"]
+ photons["NumberOfPhotons"] = number_of_photons[active_cells]
+ photons["Energy"] = np.concatenate(energies)
+
+ return photons
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/yt_analysis/yt-3.0/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list