[yt-svn] commit/yt: 4 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Sep 25 00:21:09 PDT 2015
4 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/af9d5d38e4e1/
Changeset: af9d5d38e4e1
Branch: yt
User: chummels
Date: 2015-09-18 01:27:51+00:00
Summary: Adding effective redshift field to LightRay output fields. Incorporates velocity into cosmological redshift.
Affected #: 1 file
diff -r f818f29712491ce9f597decaea69297a06603393 -r af9d5d38e4e1634df054e4fafd827ced0762ed60 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -29,6 +29,7 @@
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_objects, \
parallel_root_only
+from yt.utilities.physical_constants import speed_of_light_cgs
class LightRay(CosmologySplice):
"""
@@ -365,7 +366,7 @@
all_fields.extend(['dl', 'dredshift', 'redshift'])
if get_los_velocity:
all_fields.extend(['velocity_x', 'velocity_y',
- 'velocity_z', 'velocity_los'])
+ 'velocity_z', 'velocity_los', 'redshift_eff'])
data_fields.extend(['velocity_x', 'velocity_y', 'velocity_z'])
all_ray_storage = {}
@@ -457,6 +458,20 @@
sub_data['redshift'] = my_segment['redshift'] - \
sub_data['dredshift'].cumsum() + sub_data['dredshift']
+ # When velocity_los is present, add redshift_eff field combining
+ # redshift (cosmological) and redshift (velocity_los):
+ # 1 + redshift_vel = sqrt((1+v/c) / (1-v/c))
+ # But velocity is in proper frame, so it needs to be multiplied
+ # by scale factor (1+z) to be put in comoving frame
+ if get_los_velocity:
+
+ velocity_los_cm = (1 + sub_data['redshift']) * \
+ sub_data['velocity_los']
+ redshift_eff = ((1 + velocity_los_cm / speed_of_light_cgs) /
+ (1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
+ sub_data['redshift_eff'] = redshift_eff + sub_data['redshift']
+ del velocity_los_cm, redshift_eff
+
# Remove empty lixels.
sub_dl_nonzero = sub_data['dl'].nonzero()
for field in all_fields:
https://bitbucket.org/yt_analysis/yt/commits/7ab67784cfd4/
Changeset: 7ab67784cfd4
Branch: yt
User: chummels
Date: 2015-09-18 01:42:42+00:00
Summary: Make absorption_spectrum utilize the effective redshift field in its calculation of observed wavelength of lines and continua. Better approximation of doppler redshift than just v/c.
Affected #: 1 file
diff -r af9d5d38e4e1634df054e4fafd827ced0762ed60 -r 7ab67784cfd41bb6bb245c15f520fab9a07d7571 yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
--- a/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
+++ b/yt/analysis_modules/absorption_spectrum/absorption_spectrum.py
@@ -159,7 +159,9 @@
field_data = {}
if use_peculiar_velocity:
input_fields.append('velocity_los')
+ input_fields.append('redshift_eff')
field_units["velocity_los"] = "cm/s"
+ field_units["redshift_eff"] = ""
for feature in self.line_list + self.continuum_list:
if not feature['field_name'] in input_fields:
input_fields.append(feature['field_name'])
@@ -204,11 +206,11 @@
for continuum in self.continuum_list:
column_density = field_data[continuum['field_name']] * field_data['dl']
- delta_lambda = continuum['wavelength'] * field_data['redshift']
+ # redshift_eff field combines cosmological and velocity redshifts
if use_peculiar_velocity:
- # include factor of (1 + z) because our velocity is in proper frame.
- delta_lambda += continuum['wavelength'] * (1 + field_data['redshift']) * \
- field_data['velocity_los'] / speed_of_light_cgs
+ delta_lambda = continuum['wavelength'] * field_data['redshift_eff']
+ else:
+ delta_lambda = continuum['wavelength'] * field_data['redshift']
this_wavelength = delta_lambda + continuum['wavelength']
right_index = np.digitize(this_wavelength, self.lambda_bins).clip(0, self.n_lambda)
left_index = np.digitize((this_wavelength *
@@ -242,11 +244,11 @@
for line in parallel_objects(self.line_list, njobs=njobs):
column_density = field_data[line['field_name']] * field_data['dl']
- delta_lambda = line['wavelength'] * field_data['redshift']
+ # redshift_eff field combines cosmological and velocity redshifts
if use_peculiar_velocity:
- # include factor of (1 + z) because our velocity is in proper frame.
- delta_lambda += line['wavelength'] * (1 + field_data['redshift']) * \
- field_data['velocity_los'] / speed_of_light_cgs
+ delta_lambda = line['wavelength'] * field_data['redshift_eff']
+ else:
+ delta_lambda = line['wavelength'] * field_data['redshift']
thermal_b = np.sqrt((2 * boltzmann_constant_cgs *
field_data['temperature']) /
line['atomic_mass'])
https://bitbucket.org/yt_analysis/yt/commits/87cf0225db27/
Changeset: 87cf0225db27
Branch: yt
User: chummels
Date: 2015-09-23 21:27:50+00:00
Summary: Correctly add doppler redshift and cosmological redshift when calculating "effective" redshift in LightRay analysis module.
Affected #: 1 file
diff -r 7ab67784cfd41bb6bb245c15f520fab9a07d7571 -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
--- a/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
+++ b/yt/analysis_modules/cosmological_observation/light_ray/light_ray.py
@@ -458,19 +458,27 @@
sub_data['redshift'] = my_segment['redshift'] - \
sub_data['dredshift'].cumsum() + sub_data['dredshift']
- # When velocity_los is present, add redshift_eff field combining
- # redshift (cosmological) and redshift (velocity_los):
- # 1 + redshift_vel = sqrt((1+v/c) / (1-v/c))
- # But velocity is in proper frame, so it needs to be multiplied
- # by scale factor (1+z) to be put in comoving frame
+ # When velocity_los is present, add effective redshift
+ # (redshift_eff) field by combining cosmological redshift and
+ # doppler redshift.
+
+ # first convert los velocities to comoving frame (ie mult. by (1+z)),
+ # then calculate doppler redshift:
+ # 1 + redshift_dopp = sqrt((1+v/c) / (1-v/c))
+
+ # then to add cosmological redshift and doppler redshift, follow
+ # eqn 3.75 in Peacock's Cosmological Physics:
+ # 1 + z_obs = (1 + z_cosmo) * (1 + z_doppler)
+ # Alternatively, see eqn 5.49 in Peebles for a similar result.
if get_los_velocity:
velocity_los_cm = (1 + sub_data['redshift']) * \
sub_data['velocity_los']
- redshift_eff = ((1 + velocity_los_cm / speed_of_light_cgs) /
+ redshift_dopp = ((1 + velocity_los_cm / speed_of_light_cgs) /
(1 - velocity_los_cm / speed_of_light_cgs))**(0.5) - 1
- sub_data['redshift_eff'] = redshift_eff + sub_data['redshift']
- del velocity_los_cm, redshift_eff
+ sub_data['redshift_eff'] = (1 + redshift_dopp) * \
+ (1 + sub_data['redshift'])
+ del velocity_los_cm, redshift_dopp
# Remove empty lixels.
sub_dl_nonzero = sub_data['dl'].nonzero()
https://bitbucket.org/yt_analysis/yt/commits/b7afd79a9820/
Changeset: b7afd79a9820
Branch: yt
User: chummels
Date: 2015-09-23 21:28:23+00:00
Summary: Merging.
Affected #: 17 files
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 MANIFEST.in
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,10 +1,10 @@
-include distribute_setup.py README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
+include README* CREDITS COPYING.txt CITATION requirements.txt optional-requirements.txt
include yt/visualization/mapserver/html/map_index.html
include yt/visualization/mapserver/html/leaflet/*.css
include yt/visualization/mapserver/html/leaflet/*.js
include yt/visualization/mapserver/html/leaflet/images/*.png
recursive-include yt *.py *.pyx *.pxd *.h README* *.txt LICENSE* *.cu
-recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.inc *.html
+recursive-include doc *.rst *.txt *.py *.ipynb *.png *.jpg *.css *.html
recursive-include doc *.h *.c *.sh *.svgz *.pdf *.svg *.pyx
include doc/README doc/activate doc/activate.csh doc/cheatsheet.tex
include doc/extensions/README doc/Makefile
@@ -12,5 +12,3 @@
prune doc/build
recursive-include yt/analysis_modules/halo_finding/rockstar *.py *.pyx
prune yt/frontends/_skeleton
-prune tests
-exclude clean.sh .hgchurn
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/data_objects/grid_patch.py
--- a/yt/data_objects/grid_patch.py
+++ b/yt/data_objects/grid_patch.py
@@ -136,6 +136,8 @@
# that dx=dy=dz, at least here. We probably do elsewhere.
id = self.id - self._id_offset
if self.Parent is not None:
+ if not hasattr(self.Parent, 'dds'):
+ self.Parent._setup_dx()
self.dds = self.Parent.dds.ndarray_view() / self.ds.refine_by
else:
LE, RE = self.index.grid_left_edge[id,:], \
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/artio/data_structures.py
--- a/yt/frontends/artio/data_structures.py
+++ b/yt/frontends/artio/data_structures.py
@@ -178,7 +178,8 @@
"""
Returns (in code units) the smallest cell size in the simulation.
"""
- return 1.0/(2**self.max_level)
+ return (self.dataset.domain_width /
+ (self.dataset.domain_dimensions * 2**(self.max_level))).min()
def convert(self, unit):
return self.dataset.conversion_factors[unit]
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/chombo/fields.py
--- a/yt/frontends/chombo/fields.py
+++ b/yt/frontends/chombo/fields.py
@@ -185,27 +185,10 @@
for alias in aliases:
self.alias((ptype, alias), (ptype, f), units = output_units)
- # We'll either have particle_position or particle_position_[xyz]
- if (ptype, "particle_position") in self.field_list or \
- (ptype, "particle_position") in self.field_aliases:
- particle_scalar_functions(ptype,
- "particle_position", "particle_velocity",
- self)
- else:
- # We need to check to make sure that there's a "known field" that
- # overlaps with one of the vector fields. For instance, if we are
- # in the Stream frontend, and we have a set of scalar position
- # fields, they will overlap with -- and be overridden by -- the
- # "known" vector field that the frontend creates. So the easiest
- # thing to do is to simply remove the on-disk field (which doesn't
- # exist) and replace it with a derived field.
- if (ptype, "particle_position") in self and \
- self[ptype, "particle_position"]._function == NullFunc:
- self.pop((ptype, "particle_position"))
- particle_vector_functions(ptype,
- ["particle_position_%s" % ax for ax in 'xyz'],
- ["particle_velocity_%s" % ax for ax in 'xyz'],
- self)
+ ppos_fields = ["particle_position_%s" % ax for ax in 'xyz']
+ pvel_fields = ["particle_velocity_%s" % ax for ax in 'xyz']
+ particle_vector_functions(ptype, ppos_fields, pvel_fields, self)
+
particle_deposition_functions(ptype, "particle_position",
"particle_mass", self)
standard_particle_fields(self, ptype)
@@ -219,7 +202,7 @@
self.add_output_field(field,
units = self.ds.field_units.get(field, ""),
particle_type = True)
- self.setup_smoothed_fields(ptype,
+ self.setup_smoothed_fields(ptype,
num_neighbors=num_neighbors,
ftype=ftype)
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/gadget/tests/test_outputs.py
--- a/yt/frontends/gadget/tests/test_outputs.py
+++ b/yt/frontends/gadget/tests/test_outputs.py
@@ -14,6 +14,8 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import OrderedDict
+
from yt.testing import requires_file
from yt.utilities.answer_testing.framework import \
data_dir_load, \
@@ -23,20 +25,25 @@
isothermal_h5 = "IsothermalCollapse/snap_505.hdf5"
isothermal_bin = "IsothermalCollapse/snap_505"
-gdg = "GadgetDiskGalaxy/snapshot_0200.hdf5"
+gdg = "GadgetDiskGalaxy/snapshot_200.hdf5"
-iso_fields = (
- ("gas", "density"),
- ("gas", "temperature"),
- ('gas', 'velocity_magnitude'),
- ("deposit", "all_density"),
- ("deposit", "all_count"),
- ("deposit", "all_cic"),
- ("deposit", "PartType0_density"),
+# This maps from field names to weight field names to use for projections
+iso_fields = OrderedDict(
+ [
+ (("gas", "density"), None),
+ (("gas", "temperature"), None),
+ (("gas", "temperature"), ('gas', 'density')),
+ (('gas', 'velocity_magnitude'), None),
+ (("deposit", "all_density"), None),
+ (("deposit", "all_count"), None),
+ (("deposit", "all_cic"), None),
+ (("deposit", "PartType0_density"), None),
+ ]
)
iso_kwargs = dict(bounding_box=[[-3, 3], [-3, 3], [-3, 3]])
-gdg_fields = iso_fields + (("deposit", "PartType4_density"), )
+gdg_fields = iso_fields.copy()
+gdg_fields["deposit", "PartType4_density"] = None
gdg_kwargs = dict(bounding_box=[[-1e5, 1e5], [-1e5, 1e5], [-1e5, 1e5]])
@@ -57,6 +64,6 @@
@requires_ds(gdg, big_data=True)
def test_gadget_disk_galaxy():
- for test in sph_answer(gdg, 'snap_505', 11907080, gdg_fields,
+ for test in sph_answer(gdg, 'snapshot_200', 11907080, gdg_fields,
ds_kwargs=gdg_kwargs):
yield test
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/owls/tests/test_outputs.py
--- a/yt/frontends/owls/tests/test_outputs.py
+++ b/yt/frontends/owls/tests/test_outputs.py
@@ -14,6 +14,8 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
+from collections import OrderedDict
+
from yt.testing import \
requires_file
from yt.utilities.answer_testing.framework import \
@@ -24,17 +26,21 @@
os33 = "snapshot_033/snap_033.0.hdf5"
-_fields = (
- ("gas", "density"),
- ("gas", "temperature"),
- ('gas', 'He_p0_number_density'),
- ('gas', 'N_p1_number_density'),
- ('gas', 'velocity_magnitude'),
- ("deposit", "all_density"),
- ("deposit", "all_count"),
- ("deposit", "all_cic"),
- ("deposit", "PartType0_density"),
- ("deposit", "PartType4_density"))
+# This maps from field names to weight field names to use for projections
+_fields = OrderedDict(
+ [
+ (("gas", "density"), None),
+ (("gas", "temperature"), None),
+ (("gas", "temperature"), ("gas", "density")),
+ (('gas', 'He_p0_number_density'), None),
+ (('gas', 'velocity_magnitude'), None),
+ (("deposit", "all_density"), None),
+ (("deposit", "all_count"), None),
+ (("deposit", "all_cic"), None),
+ (("deposit", "PartType0_density"), None),
+ (("deposit", "PartType4_density"), None),
+ ]
+)
@requires_ds(os33, big_data=True)
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/stream/tests/test_outputs.py
--- a/yt/frontends/stream/tests/test_outputs.py
+++ b/yt/frontends/stream/tests/test_outputs.py
@@ -19,7 +19,7 @@
import unittest
from yt.testing import assert_raises
-from yt.utilities.answer_testing.framework import data_dir_load
+from yt.convenience import load
from yt.utilities.exceptions import YTOutputNotIdentified
class TestEmptyLoad(unittest.TestCase):
@@ -40,6 +40,6 @@
shutil.rmtree(self.tmpdir)
def test_load_empty_file(self):
- assert_raises(YTOutputNotIdentified, data_dir_load, "not_a_file")
- assert_raises(YTOutputNotIdentified, data_dir_load, "empty_file")
- assert_raises(YTOutputNotIdentified, data_dir_load, "empty_directory")
+ assert_raises(YTOutputNotIdentified, load, "not_a_file")
+ assert_raises(YTOutputNotIdentified, load, "empty_file")
+ assert_raises(YTOutputNotIdentified, load, "empty_directory")
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/tipsy/data_structures.py
--- a/yt/frontends/tipsy/data_structures.py
+++ b/yt/frontends/tipsy/data_structures.py
@@ -72,7 +72,7 @@
bounding_box=None,
units_override=None):
# Because Tipsy outputs don't have a fixed domain boundary, one can
- # specify a bounding box which effectively gives a domain_left_edge
+ # specify a bounding box which effectively gives a domain_left_edge
# and domain_right_edge
self.bounding_box = bounding_box
self.filter_bbox = (bounding_box is not None)
@@ -180,7 +180,7 @@
else:
self.domain_left_edge = None
self.domain_right_edge = None
- else:
+ else:
bbox = np.array(self.bounding_box, dtype="float64")
if bbox.shape == (2, 3):
bbox = bbox.transpose()
@@ -270,7 +270,7 @@
'''
This method automatically detects whether the tipsy file is big/little endian
and is not corrupt/invalid. It returns a tuple of (Valid, endianswap) where
- Valid is a boolean that is true if the file is a tipsy file, and endianswap is
+ Valid is a boolean that is true if the file is a tipsy file, and endianswap is
the endianness character '>' or '<'.
'''
try:
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/tipsy/io.py
--- a/yt/frontends/tipsy/io.py
+++ b/yt/frontends/tipsy/io.py
@@ -17,52 +17,53 @@
import glob
import numpy as np
+from numpy.lib.recfunctions import append_fields
import os
-from yt.geometry.oct_container import \
- _ORDER_MAX
from yt.utilities.io_handler import \
BaseIOHandler
from yt.utilities.lib.geometry_utils import \
compute_morton
from yt.utilities.logger import ytLogger as \
mylog
-
+
CHUNKSIZE = 10000000
+
class IOHandlerTipsyBinary(BaseIOHandler):
_dataset_type = "tipsy"
_vector_fields = ("Coordinates", "Velocity", "Velocities")
- _pdtypes = None # dtypes, to be filled in later
+ _pdtypes = None # dtypes, to be filled in later
+ _aux_pdtypes = None # auxiliary files' dtypes
- _ptypes = ( "Gas",
- "DarkMatter",
- "Stars" )
- _chunksize = 64*64*64
+ _ptypes = ("Gas",
+ "DarkMatter",
+ "Stars")
+ _chunksize = 64 * 64 * 64
_aux_fields = None
- _fields = ( ("Gas", "Mass"),
- ("Gas", "Coordinates"),
- ("Gas", "Velocities"),
- ("Gas", "Density"),
- ("Gas", "Temperature"),
- ("Gas", "Epsilon"),
- ("Gas", "Metals"),
- ("Gas", "Phi"),
- ("DarkMatter", "Mass"),
- ("DarkMatter", "Coordinates"),
- ("DarkMatter", "Velocities"),
- ("DarkMatter", "Epsilon"),
- ("DarkMatter", "Phi"),
- ("Stars", "Mass"),
- ("Stars", "Coordinates"),
- ("Stars", "Velocities"),
- ("Stars", "Metals"),
- ("Stars", "FormationTime"),
- ("Stars", "Epsilon"),
- ("Stars", "Phi")
- )
+ _fields = (("Gas", "Mass"),
+ ("Gas", "Coordinates"),
+ ("Gas", "Velocities"),
+ ("Gas", "Density"),
+ ("Gas", "Temperature"),
+ ("Gas", "Epsilon"),
+ ("Gas", "Metals"),
+ ("Gas", "Phi"),
+ ("DarkMatter", "Mass"),
+ ("DarkMatter", "Coordinates"),
+ ("DarkMatter", "Velocities"),
+ ("DarkMatter", "Epsilon"),
+ ("DarkMatter", "Phi"),
+ ("Stars", "Mass"),
+ ("Stars", "Coordinates"),
+ ("Stars", "Velocities"),
+ ("Stars", "Metals"),
+ ("Stars", "FormationTime"),
+ ("Stars", "Epsilon"),
+ ("Stars", "Phi")
+ )
def __init__(self, *args, **kwargs):
self._aux_fields = []
@@ -71,50 +72,6 @@
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
- def _read_aux_fields(self, field, mask, data_file):
- """
- Read in auxiliary files from gasoline/pkdgrav.
- This method will automatically detect the format of the file.
- """
- filename = data_file.filename+'.'+field
- dtype = None
- # We need to do some fairly ugly detection to see what format the auxiliary
- # files are in. They can be either ascii or binary, and the binary files can be
- # either floats, ints, or doubles. We're going to use a try-catch cascade to
- # determine the format.
- try:#ASCII
- auxdata = np.genfromtxt(filename, skip_header=1)
- if auxdata.size != np.sum(data_file.total_particles.values()):
- print("Error reading auxiliary tipsy file")
- raise RuntimeError
- except ValueError:#binary/xdr
- f = open(filename, 'rb')
- l = struct.unpack(data_file.ds.endian+"i", f.read(4))[0]
- if l != np.sum(data_file.total_particles.values()):
- print("Error reading auxiliary tipsy file")
- raise RuntimeError
- dtype = 'd'
- if field in ('iord', 'igasorder', 'grp'):#These fields are integers
- dtype = 'i'
- try:# If we try loading doubles by default, we can catch an exception and try floats next
- auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
- except struct.error:
- f.seek(4)
- dtype = 'f'
- try:
- auxdata = np.array(struct.unpack(data_file.ds.endian+(l*dtype), f.read()))
- except struct.error: # None of the binary attempts to read succeeded
- print("Error reading auxiliary tipsy file")
- raise RuntimeError
-
- # Use the mask to slice out the appropriate particle type data
- if mask.size == data_file.total_particles['Gas']:
- return auxdata[:data_file.total_particles['Gas']]
- elif mask.size == data_file.total_particles['DarkMatter']:
- return auxdata[data_file.total_particles['Gas']:-data_file.total_particles['DarkMatter']]
- else:
- return auxdata[-data_file.total_particles['Stars']:]
-
def _fill_fields(self, fields, vals, mask, data_file):
if mask is None:
size = 0
@@ -123,27 +80,26 @@
rv = {}
for field in fields:
mylog.debug("Allocating %s values for %s", size, field)
- if field in self._aux_fields: #Read each of the auxiliary fields
- rv[field] = self._read_aux_fields(field, mask, data_file)
- elif field in self._vector_fields:
+ if field in self._vector_fields:
rv[field] = np.empty((size, 3), dtype="float64")
- if size == 0: continue
- rv[field][:,0] = vals[field]['x'][mask]
- rv[field][:,1] = vals[field]['y'][mask]
- rv[field][:,2] = vals[field]['z'][mask]
+ if size == 0:
+ continue
+ rv[field][:, 0] = vals[field]['x'][mask]
+ rv[field][:, 1] = vals[field]['y'][mask]
+ rv[field][:, 2] = vals[field]['z'][mask]
else:
rv[field] = np.empty(size, dtype="float64")
- if size == 0: continue
+ if size == 0:
+ continue
rv[field][:] = vals[field][mask]
if field == "Coordinates":
eps = np.finfo(rv[field].dtype).eps
for i in range(3):
- rv[field][:,i] = np.clip(rv[field][:,i],
- self.domain_left_edge[i] + eps,
- self.domain_right_edge[i] - eps)
+ rv[field][:, i] = np.clip(rv[field][:, i],
+ self.domain_left_edge[i] + eps,
+ self.domain_right_edge[i] - eps)
return rv
-
def _read_particle_coords(self, chunks, ptf):
data_files = set([])
for chunk in chunks:
@@ -153,14 +109,16 @@
poff = data_file.field_offsets
tp = data_file.total_particles
f = open(data_file.filename, "rb")
- for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+ for ptype, field_list in sorted(ptf.items(),
+ key=lambda a: poff[a[0]]):
f.seek(poff[ptype], os.SEEK_SET)
total = 0
while total < tp[ptype]:
- p = np.fromfile(f, self._pdtypes[ptype],
- count=min(self._chunksize, tp[ptype] - total))
+ count = min(self._chunksize, tp[ptype] - total)
+ p = np.fromfile(f, self._pdtypes[ptype], count=count)
total += p.size
- d = [p["Coordinates"][ax].astype("float64") for ax in 'xyz']
+ d = [p["Coordinates"][ax].astype("float64")
+ for ax in 'xyz']
del p
yield ptype, d
@@ -172,24 +130,68 @@
data_files.update(obj.data_files)
for data_file in sorted(data_files):
poff = data_file.field_offsets
+ aux_fields_offsets = \
+ self._calculate_particle_offsets_aux(data_file)
tp = data_file.total_particles
f = open(data_file.filename, "rb")
- for ptype, field_list in sorted(ptf.items(), key=lambda a: poff[a[0]]):
+
+ # we need to open all aux files for chunking to work
+ aux_fh = {}
+ for afield in self._aux_fields:
+ aux_fh[afield] = open(data_file.filename + '.' + afield, 'rb')
+
+ for ptype, field_list in sorted(ptf.items(),
+ key=lambda a: poff[a[0]]):
f.seek(poff[ptype], os.SEEK_SET)
+ afields = list(set(field_list).intersection(self._aux_fields))
+ for afield in afields:
+ aux_fh[afield].seek(
+ aux_fields_offsets[afield][ptype][0], os.SEEK_SET)
+
total = 0
while total < tp[ptype]:
- p = np.fromfile(f, self._pdtypes[ptype],
- count=min(self._chunksize, tp[ptype] - total))
+ count = min(self._chunksize, tp[ptype] - total)
+ p = np.fromfile(f, self._pdtypes[ptype], count=count)
+
+ auxdata = []
+ for afield in afields:
+ if isinstance(self._aux_pdtypes[afield], np.dtype):
+ auxdata.append(
+ np.fromfile(aux_fh[afield],
+ self._aux_pdtypes[afield],
+ count=count)
+ )
+ else:
+ aux_fh[afield].seek(0, os.SEEK_SET)
+ sh = aux_fields_offsets[afield][ptype][0] + total
+ sf = aux_fields_offsets[afield][ptype][1] + \
+ tp[ptype] - count
+ if tp[ptype] > 0:
+ aux = np.genfromtxt(
+ aux_fh[afield], skip_header=sh,
+ skip_footer=sf
+ )
+ if aux.ndim < 1:
+ aux = np.array([aux])
+ auxdata.append(aux)
+
total += p.size
+ if afields:
+ p = append_fields(p, afields, auxdata)
mask = selector.select_points(
p["Coordinates"]['x'].astype("float64"),
p["Coordinates"]['y'].astype("float64"),
p["Coordinates"]['z'].astype("float64"), 0.0)
- if mask is None: continue
+ if mask is None:
+ continue
tf = self._fill_fields(field_list, p, mask, data_file)
for field in field_list:
yield (ptype, field), tf.pop(field)
+
+ # close all file handles
f.close()
+ for fh in list(aux_fh.values()):
+ fh.close()
def _update_domain(self, data_file):
'''
@@ -201,24 +203,25 @@
ind = 0
# Check to make sure that the domain hasn't already been set
# by the parameter file
- if np.all(np.isfinite(ds.domain_left_edge)) and np.all(np.isfinite(ds.domain_right_edge)):
+ if np.all(np.isfinite(ds.domain_left_edge)) and \
+ np.all(np.isfinite(ds.domain_right_edge)):
return
with open(data_file.filename, "rb") as f:
ds.domain_left_edge = 0
ds.domain_right_edge = 0
f.seek(ds._header_offset)
- mi = np.array([1e30, 1e30, 1e30], dtype="float64")
- ma = -np.array([1e30, 1e30, 1e30], dtype="float64")
+ mi = np.array([1e30, 1e30, 1e30], dtype="float64")
+ ma = -np.array([1e30, 1e30, 1e30], dtype="float64")
for iptype, ptype in enumerate(self._ptypes):
# We'll just add the individual types separately
count = data_file.total_particles[ptype]
- if count == 0: continue
- start, stop = ind, ind + count
+ if count == 0:
+ continue
+ stop = ind + count
while ind < stop:
c = min(CHUNKSIZE, stop - ind)
- pp = np.fromfile(f, dtype = self._pdtypes[ptype],
- count = c)
- eps = np.finfo(pp["Coordinates"]["x"].dtype).eps
+ pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+ count=c)
np.minimum(mi, [pp["Coordinates"]["x"].min(),
pp["Coordinates"]["y"].min(),
pp["Coordinates"]["z"].min()], mi)
@@ -234,7 +237,7 @@
ds.domain_right_edge = ds.arr(ma, 'code_length')
ds.domain_width = DW = ds.domain_right_edge - ds.domain_left_edge
ds.unit_registry.add("unitary", float(DW.max() * DW.units.base_value),
- DW.units.dimensions)
+ DW.units.dimensions)
def _initialize_index(self, data_file, regions):
ds = data_file.ds
@@ -242,7 +245,6 @@
dtype="uint64")
ind = 0
DLE, DRE = ds.domain_left_edge, ds.domain_right_edge
- dx = (DRE - DLE) / (2**_ORDER_MAX)
self.domain_left_edge = DLE.in_units("code_length").ndarray_view()
self.domain_right_edge = DRE.in_units("code_length").ndarray_view()
with open(data_file.filename, "rb") as f:
@@ -250,28 +252,29 @@
for iptype, ptype in enumerate(self._ptypes):
# We'll just add the individual types separately
count = data_file.total_particles[ptype]
- if count == 0: continue
- start, stop = ind, ind + count
+ if count == 0:
+ continue
+ stop = ind + count
while ind < stop:
c = min(CHUNKSIZE, stop - ind)
- pp = np.fromfile(f, dtype = self._pdtypes[ptype],
- count = c)
+ pp = np.fromfile(f, dtype=self._pdtypes[ptype],
+ count=c)
mis = np.empty(3, dtype="float64")
mas = np.empty(3, dtype="float64")
for axi, ax in enumerate('xyz'):
mi = pp["Coordinates"][ax].min()
ma = pp["Coordinates"][ax].max()
- mylog.debug("Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
+ mylog.debug(
+ "Spanning: %0.3e .. %0.3e in %s", mi, ma, ax)
mis[axi] = mi
mas[axi] = ma
pos = np.empty((pp.size, 3), dtype="float64")
for i, ax in enumerate("xyz"):
- eps = np.finfo(pp["Coordinates"][ax].dtype).eps
- pos[:,i] = pp["Coordinates"][ax]
+ pos[:, i] = pp["Coordinates"][ax]
regions.add_data_file(pos, data_file.file_id,
data_file.ds.filter_bbox)
- morton[ind:ind+c] = compute_morton(
- pos[:,0], pos[:,1], pos[:,2],
+ morton[ind:ind + c] = compute_morton(
+ pos[:, 0], pos[:, 1], pos[:, 2],
DLE, DRE, data_file.ds.filter_bbox)
ind += c
mylog.info("Adding %0.3e particles", morton.size)
@@ -286,7 +289,7 @@
return npart
@classmethod
- def _compute_dtypes(cls, field_dtypes, endian = "<"):
+ def _compute_dtypes(cls, field_dtypes, endian="<"):
pds = {}
for ptype, field in cls._fields:
dtbase = field_dtypes.get(field, 'f')
@@ -304,27 +307,50 @@
def _create_dtypes(self, data_file):
# We can just look at the particle counts.
self._header_offset = data_file.ds._header_offset
- self._pdtypes = {}
- pds = {}
- field_list = []
- tp = data_file.total_particles
- aux_filenames = glob.glob(data_file.filename+'.*') # Find out which auxiliaries we have
- self._aux_fields = [f[1+len(data_file.filename):] for f in aux_filenames]
self._pdtypes = self._compute_dtypes(data_file.ds._field_dtypes,
data_file.ds.endian)
+ self._field_list = []
for ptype, field in self._fields:
- if tp[ptype] == 0:
+ if data_file.total_particles[ptype] == 0:
# We do not want out _pdtypes to have empty particles.
self._pdtypes.pop(ptype, None)
continue
- field_list.append((ptype, field))
- if any(["Gas"==f[0] for f in field_list]): #Add the auxiliary fields to each ptype we have
- field_list += [("Gas",a) for a in self._aux_fields]
- if any(["DarkMatter"==f[0] for f in field_list]):
- field_list += [("DarkMatter",a) for a in self._aux_fields]
- if any(["Stars"==f[0] for f in field_list]):
- field_list += [("Stars",a) for a in self._aux_fields]
- self._field_list = field_list
+ self._field_list.append((ptype, field))
+
+ # Find out which auxiliaries we have and what is their format
+ tot_parts = np.sum(data_file.total_particles.values())
+ endian = data_file.ds.endian
+ self._aux_pdtypes = {}
+ self._aux_fields = [f.rsplit('.')[-1]
+ for f in glob.glob(data_file.filename + '.*')]
+ for afield in self._aux_fields:
+ filename = data_file.filename + '.' + afield
+ # We need to do some fairly ugly detection to see what format the
+ # auxiliary files are in. They can be either ascii or binary, and
+ # the binary files can be either floats, ints, or doubles. We're
+ # going to use a try-catch cascade to determine the format.
+ filesize = os.stat(filename).st_size
+ if np.fromfile(filename, np.dtype(endian + 'i4'),
+ count=1) != tot_parts:
+ with open(filename) as f:
+ if int(f.readline()) != tot_parts:
+ raise RuntimeError
+ self._aux_pdtypes[afield] = "ascii"
+ elif (filesize - 4) / 8 == tot_parts:
+ self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'd')])
+ elif (filesize - 4) / 4 == tot_parts:
+ if afield.startswith("i"):
+ self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'i')])
+ else:
+ self._aux_pdtypes[afield] = np.dtype([('aux', endian + 'f')])
+ else:
+ raise RuntimeError
+
+ # Add the auxiliary fields to each ptype we have
+ for ptype in self._ptypes:
+ if any([ptype == field[0] for field in self._field_list]):
+ self._field_list += \
+ [(ptype, afield) for afield in self._aux_fields]
return self._field_list
def _identify_fields(self, data_file):
@@ -335,7 +361,29 @@
pos = data_file.ds._header_offset
for ptype in self._ptypes:
field_offsets[ptype] = pos
- if data_file.total_particles[ptype] == 0: continue
+ if data_file.total_particles[ptype] == 0:
+ continue
size = self._pdtypes[ptype].itemsize
pos += data_file.total_particles[ptype] * size
return field_offsets
+
+ def _calculate_particle_offsets_aux(self, data_file):
+ aux_fields_offsets = {}
+ tp = data_file.total_particles
+ for afield in self._aux_fields:
+ aux_fields_offsets[afield] = {}
+ if isinstance(self._aux_pdtypes[afield], np.dtype):
+ pos = 4 # i4
+ for ptype in self._ptypes:
+ aux_fields_offsets[afield][ptype] = (pos, 0)
+ if data_file.total_particles[ptype] == 0:
+ continue
+ size = np.dtype(self._aux_pdtypes[afield]).itemsize
+ pos += data_file.total_particles[ptype] * size
+ else:
+ aux_fields_offsets[afield].update(
+ {'Gas': (1, tp["DarkMatter"] + tp["Stars"]),
+ 'DarkMatter': (1 + tp["Gas"], tp["Stars"]),
+ 'Stars': (1 + tp["DarkMatter"] + tp["Gas"], 0)}
+ )
+ return aux_fields_offsets
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/frontends/tipsy/tests/test_outputs.py
--- a/yt/frontends/tipsy/tests/test_outputs.py
+++ b/yt/frontends/tipsy/tests/test_outputs.py
@@ -14,7 +14,11 @@
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
-from yt.testing import assert_equal, requires_file
+from collections import OrderedDict
+
+from yt.testing import \
+ assert_equal, \
+ requires_file
from yt.utilities.answer_testing.framework import \
requires_ds, \
data_dir_load, \
@@ -92,12 +96,15 @@
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
-tg_fields = (
- ('gas', 'density'),
- ('gas', 'temperature'),
- ('gas', 'velocity_magnitude'),
- ('gas', 'Fe_fraction'),
- ('Stars', 'Metals'),
+tg_fields = OrderedDict(
+ [
+ (('gas', 'density'), None),
+ (('gas', 'temperature'), None),
+ (('gas', 'temperature'), ('gas', 'density')),
+ (('gas', 'velocity_magnitude'), None),
+ (('gas', 'Fe_fraction'), None),
+ (('Stars', 'Metals'), None),
+ ]
)
tipsy_gal = 'TipsyGalaxy/galaxy.00300'
@@ -105,7 +112,7 @@
def test_tipsy_galaxy():
for test in sph_answer(tipsy_gal, 'galaxy.00300', 315372, tg_fields):
yield test
-
+
@requires_file(gasoline_dmonly)
@requires_file(pkdgrav)
def test_TipsyDataset():
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/units/setup.py
--- a/yt/units/setup.py
+++ b/yt/units/setup.py
@@ -1,8 +1,4 @@
#!/usr/bin/env python
-import setuptools
-import os
-import sys
-import os.path
def configuration(parent_package='', top_path=None):
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/units/tests/test_units.py
--- a/yt/units/tests/test_units.py
+++ b/yt/units/tests/test_units.py
@@ -225,7 +225,7 @@
"""
try:
- u1 = Unit(Symbol("jigawatts"))
+ Unit(Symbol("jigawatts"))
except UnitParseError:
yield assert_true, True
else:
@@ -237,7 +237,7 @@
"""
try:
- u1 = Unit([1]) # something other than Expr and str
+ Unit([1]) # something other than Expr and str
except UnitParseError:
yield assert_true, True
else:
@@ -249,7 +249,7 @@
"""
try:
- u1 = Unit("a", base_value=1, dimensions="(mass)")
+ Unit("a", base_value=1, dimensions="(mass)")
except UnitParseError:
yield assert_true, True
else:
@@ -264,7 +264,7 @@
a = Symbol("a")
try:
- u1 = Unit("a", base_value=1, dimensions=a)
+ Unit("a", base_value=1, dimensions=a)
except UnitParseError:
pass
else:
@@ -277,7 +277,7 @@
"""
try:
- u1 = Unit("a", base_value="a", dimensions=(mass/time))
+ Unit("a", base_value="a", dimensions=(mass/time))
except UnitParseError:
yield assert_true, True
else:
@@ -454,7 +454,7 @@
assert_raises(InvalidUnitOperation, operator.mul, u1, u2)
assert_raises(InvalidUnitOperation, operator.truediv, u1, u2)
-def test_comoving_labels():
+def test_comoving_and_code_unit_labels():
ds = fake_random_ds(64, nprocs=1)
# create a fake comoving unit
@@ -464,3 +464,10 @@
test_unit = Unit('Mpccm', registry=ds.unit_registry)
assert_almost_equal(test_unit.base_value, cm_per_mpc/3)
assert_equal(test_unit.latex_repr, r'\rm{Mpc}/(1+z)')
+
+ test_unit = Unit('code_mass', registry=ds.unit_registry)
+ assert_equal(test_unit.latex_repr, '\\rm{code\\ mass}')
+
+ test_unit = Unit('code_mass/code_length**3', registry=ds.unit_registry)
+ assert_equal(test_unit.latex_repr,
+ '\\frac{\\rm{code\\ mass}}{\\rm{code\\ length}^{3}}')
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/units/unit_object.py
--- a/yt/units/unit_object.py
+++ b/yt/units/unit_object.py
@@ -18,7 +18,7 @@
Pow, Symbol, Integer, \
Float, Basic, Rational, sqrt
from sympy.core.numbers import One
-from sympy import sympify, latex, symbols
+from sympy import sympify, latex
from sympy.parsing.sympy_parser import \
parse_expr, auto_number, rationalize
from keyword import iskeyword
@@ -32,7 +32,6 @@
from yt.utilities.exceptions import YTUnitsNotReducible
import copy
-import string
import token
class UnitParseError(Exception):
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/units/unit_registry.py
--- a/yt/units/unit_registry.py
+++ b/yt/units/unit_registry.py
@@ -63,7 +63,7 @@
if tex_repr is None:
# make educated guess that will look nice in most cases
- tex_repr = r"\rm{" + symbol + "}"
+ tex_repr = r"\rm{" + symbol.replace('_', '\ ') + "}"
# Add to lut
self.lut.update({symbol: (base_value, dimensions, offset, tex_repr)})
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/units/yt_array.py
--- a/yt/units/yt_array.py
+++ b/yt/units/yt_array.py
@@ -40,7 +40,6 @@
from yt.utilities.on_demand_imports import _astropy
from sympy import Rational
from yt.units.unit_lookup_table import \
- unit_prefixes, prefixable_units, \
default_unit_symbol_lut
from yt.units.equivalencies import equivalence_registry
from yt.utilities.logger import ytLogger as mylog
@@ -630,12 +629,12 @@
# Converting from AstroPy Quantity
u = arr.unit
ap_units = []
- for base, power in zip(u.bases, u.powers):
+ for base, exponent in zip(u.bases, u.powers):
unit_str = base.to_string()
# we have to do this because AstroPy is silly and defines
# hour as "h"
if unit_str == "h": unit_str = "hr"
- ap_units.append("%s**(%s)" % (unit_str, Rational(power)))
+ ap_units.append("%s**(%s)" % (unit_str, Rational(exponent)))
ap_units = "*".join(ap_units)
if isinstance(arr.value, np.ndarray):
return YTArray(arr.value, ap_units, registry=unit_registry)
@@ -675,9 +674,9 @@
>>> c = yt.YTArray.from_pint(b)
"""
p_units = []
- for base, power in arr.units.items():
+ for base, exponent in arr.units.items():
bs = convert_pint_units(base)
- p_units.append("%s**(%s)" % (bs, Rational(power)))
+ p_units.append("%s**(%s)" % (bs, Rational(exponent)))
p_units = "*".join(p_units)
if isinstance(arr.magnitude, np.ndarray):
return YTArray(arr.magnitude, p_units, registry=unit_registry)
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/utilities/answer_testing/framework.py
--- a/yt/utilities/answer_testing/framework.py
+++ b/yt/utilities/answer_testing/framework.py
@@ -805,7 +805,8 @@
ds_fn, field, dobj_name)
def big_patch_amr(ds_fn, fields, input_center="max", input_weight="density"):
- if not can_run_ds(ds_fn): return
+ if not can_run_ds(ds_fn):
+ return
dso = [ None, ("sphere", (input_center, (0.1, 'unitary')))]
yield GridHierarchyTest(ds_fn)
yield ParentageRelationshipsTest(ds_fn)
@@ -837,17 +838,16 @@
s1 = dobj["ones"].sum()
s2 = sum(mask.sum() for block, mask in dobj.blocks)
yield assert_equal, s1, s2
- for field in fields:
+ for field, weight_field in fields.items():
if field[0] in ds.particle_types:
particle_type = True
else:
particle_type = False
for axis in [0, 1, 2]:
- for weight_field in [None, ('gas', 'density')]:
- if particle_type is False:
- yield PixelizedProjectionValuesTest(
- ds_fn, axis, field, weight_field,
- dobj_name)
+ if particle_type is False:
+ yield PixelizedProjectionValuesTest(
+ ds_fn, axis, field, weight_field,
+ dobj_name)
yield FieldValuesTest(ds_fn, field, dobj_name,
particle_type=particle_type)
return
diff -r 87cf0225db27b53dd2cd4fb6b9962176ae5b06f5 -r b7afd79a9820e8605a1e73c5d3d7a077cae7fe96 yt/visualization/plot_container.py
--- a/yt/visualization/plot_container.py
+++ b/yt/visualization/plot_container.py
@@ -52,6 +52,7 @@
args[0].plots[field].figure = None
args[0].plots[field].axes = None
args[0].plots[field].cax = None
+ args[0]._setup_plots()
return rv
return newfunc
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list