[yt-svn] commit/yt: 5 new changesets
commits-noreply at bitbucket.org
commits-noreply at bitbucket.org
Fri Jan 19 23:04:48 PST 2018
5 new commits in yt:
https://bitbucket.org/yt_analysis/yt/commits/41e06797db1d/
Changeset: 41e06797db1d
User: C0nsultant
Date: 2017-12-08 11:47:41+00:00
Summary: Harden file handling.
fileBased and groupBased files are now loaded accordingly.
Previously, groupBased files were handled as fileBased files with only the first iteration considered.
If a file does not contain the paths indicated by `meshesPath` or `particlesPath`,
it is assumed that the file contains no meshes or particles at all.
Test cases for both variants.
Affected #: 5 files
diff -r 1d7e99ee5fdcdee5ac15cb7012fe9f467f09c2ec -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 yt/frontends/open_pmd/data_structures.py
--- a/yt/frontends/open_pmd/data_structures.py
+++ b/yt/frontends/open_pmd/data_structures.py
@@ -25,6 +25,7 @@
import numpy as np
from yt.data_objects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
+from yt.data_objects.time_series import DatasetSeries
from yt.frontends.open_pmd.fields import OpenPMDFieldInfo
from yt.frontends.open_pmd.misc import \
is_const_component, \
@@ -36,6 +37,10 @@
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5
+ompd_known_versions = [StrictVersion("1.0.0"),
+ StrictVersion("1.0.1")]
+opmd_required_attributes = ["openPMD", "basePath", "meshesPath", "particlesPath"]
+
class OpenPMDGrid(AMRGridPatch):
"""Represents chunk of data on-disk.
@@ -47,8 +52,8 @@
_id_offset = 0
__slots__ = ["_level_id"]
# Every particle species and mesh might have different hdf5-indices and offsets
- ftypes=[]
- ptypes=[]
+ ftypes = []
+ ptypes = []
findex = 0
foffset = 0
pindex = 0
@@ -98,17 +103,22 @@
f = self.dataset._handle
bp = self.dataset.base_path
pp = self.dataset.particles_path
- for ptype in self.ds.particle_types_raw:
- if str(ptype) == "io":
- spec = list(f[bp + pp].keys())[0]
- else:
- spec = ptype
- axis = list(f[bp + pp + "/" + spec + "/position"].keys())[0]
- pos = f[bp + pp + "/" + spec + "/position/" + axis]
- if is_const_component(pos):
- result[ptype] = pos.attrs["shape"]
- else:
- result[ptype] = pos.len()
+
+ try:
+ for ptype in self.ds.particle_types_raw:
+ if str(ptype) == "io":
+ spec = list(f[bp + pp].keys())[0]
+ else:
+ spec = ptype
+ axis = list(f[bp + pp + "/" + spec + "/position"].keys())[0]
+ pos = f[bp + pp + "/" + spec + "/position/" + axis]
+ if is_const_component(pos):
+ result[ptype] = pos.attrs["shape"]
+ else:
+ result[ptype] = pos.len()
+ except(KeyError):
+ result["io"] = 0
+
return result
def _detect_output_fields(self):
@@ -127,46 +137,50 @@
mesh_fields = []
try:
- for field in f[bp + mp].keys():
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
try:
- for axis in f[bp + mp + field].keys():
- mesh_fields.append(field.replace("_", "-")
+ mesh = meshes[mname]
+ for axis in mesh.keys():
+ mesh_fields.append(mname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# This is a h5.Dataset (i.e. no axes)
- mesh_fields.append(field.replace("_", "-"))
- except KeyError:
- # There are no mesh fields
+ mesh_fields.append(mname.replace("_", "-"))
+ except(KeyError):
pass
self.field_list = [("openPMD", str(field)) for field in mesh_fields]
particle_fields = []
try:
- for species in f[bp + pp].keys():
- for record in f[bp + pp + species].keys():
- if is_const_component(f[bp + pp + species + "/" + record]):
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ for recname in species.keys():
+ record = species[recname]
+ if is_const_component(record):
# Record itself (e.g. particle_mass) is constant
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-"))
- elif "particlePatches" not in record:
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-"))
+ elif "particlePatches" not in recname:
try:
# Create a field for every axis (x,y,z) of every property (position)
# of every species (electrons)
- axes = list(f[bp + pp + species + "/" + record].keys())
- if str(record) == "position":
- record = "positionCoarse"
+ axes = list(record.keys())
+ if str(recname) == "position":
+ recname = "positionCoarse"
for axis in axes:
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-")
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# Record is a dataset, does not have axes (e.g. weighting)
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-"))
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-"))
pass
else:
pass
- if len(list(f[bp + pp].keys())) > 1:
+ if len(list(particles.keys())) > 1:
# There is more than one particle species, use the specific names as field types
self.field_list.extend(
[(str(field).split("_")[0],
@@ -176,8 +190,7 @@
self.field_list.extend(
[("io",
("particle_" + "_".join(str(field).split("_")[1:]))) for field in particle_fields])
- except KeyError:
- # There are no particle fields
+ except(KeyError):
pass
def _count_grids(self):
@@ -195,25 +208,36 @@
self.num_grids = 0
- for mesh in f[bp + mp].keys():
- if type(f[bp + mp + mesh]) is h5.Group:
- shape = f[bp + mp + mesh + "/" + list(f[bp + mp + mesh].keys())[0]].shape
- else:
- shape = f[bp + mp + mesh].shape
- spacing = tuple(f[bp + mp + mesh].attrs["gridSpacing"])
- offset = tuple(f[bp + mp + mesh].attrs["gridGlobalOffset"])
- unit_si = f[bp + mp + mesh].attrs["gridUnitSI"]
- self.meshshapes[mesh] = (shape, spacing, offset, unit_si)
- for species in f[bp + pp].keys():
- if "particlePatches" in f[bp + pp + "/" + species].keys():
- for (patch, size) in enumerate(f[bp + pp + "/" + species + "/particlePatches/numParticles"]):
- self.numparts[species + "#" + str(patch)] = size
- else:
- axis = list(f[bp + pp + species + "/position"].keys())[0]
- if is_const_component(f[bp + pp + species + "/position/" + axis]):
- self.numparts[species] = f[bp + pp + species + "/position/" + axis].attrs["shape"]
+ try:
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
+ mesh = meshes[mname]
+ if type(mesh) is h5.Group:
+ shape = mesh[list(mesh.keys())[0]].shape
else:
- self.numparts[species] = f[bp + pp + species + "/position/" + axis].len()
+ shape = mesh.shape
+ spacing = tuple(mesh.attrs["gridSpacing"])
+ offset = tuple(mesh.attrs["gridGlobalOffset"])
+ unit_si = mesh.attrs["gridUnitSI"]
+ self.meshshapes[mname] = (shape, spacing, offset, unit_si)
+ except(KeyError):
+ pass
+
+ try:
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ if "particlePatches" in species.keys():
+ for (patch, size) in enumerate(species["/particlePatches/numParticles"]):
+ self.numparts[pname + "#" + str(patch)] = size
+ else:
+ axis = list(species["/position"].keys())[0]
+ if is_const_component(species["/position/" + axis]):
+ self.numparts[pname] = species["/position/" + axis].attrs["shape"]
+ else:
+ self.numparts[pname] = species["/position/" + axis].len()
+ except(KeyError):
+ pass
# Limit values per grid by resulting memory footprint
self.vpg = int(self.dataset.gridsize / 4) # 4Byte per value (f32)
@@ -231,9 +255,9 @@
else:
no_patches[k] = v
for size in set(no_patches.values()):
- self.num_grids += int(np.ceil(size * self.vpg ** -1))
+ self.num_grids += int(np.ceil(size * self.vpg**-1))
for size in patches.values():
- self.num_grids += int(np.ceil(size * self.vpg ** -1))
+ self.num_grids += int(np.ceil(size * self.vpg**-1))
def _parse_index(self):
"""Fills each grid with appropriate properties (extent, dimensions, ...)
@@ -266,13 +290,13 @@
domain_dimension = np.asarray(shape, dtype=np.int32)
domain_dimension = np.append(domain_dimension, np.ones(3 - len(domain_dimension)))
# Number of grids of this shape
- num_grids = min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg ** -1)))
+ num_grids = min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg**-1)))
gle = offset * unit_si # self.dataset.domain_left_edge
gre = domain_dimension[:spacing.size] * unit_si * spacing + gle # self.dataset.domain_right_edge
gle = np.append(gle, np.zeros(3 - len(gle)))
gre = np.append(gre, np.ones(3 - len(gre)))
grid_dim_offset = np.linspace(0, domain_dimension[0], num_grids + 1, dtype=np.int32)
- grid_edge_offset = grid_dim_offset * np.float(domain_dimension[0]) ** -1 * (gre[0] - gle[0]) + gle[0]
+ grid_edge_offset = grid_dim_offset * np.float(domain_dimension[0])**-1 * (gre[0] - gle[0]) + gle[0]
mesh_names = []
for (mname, mdata) in self.meshshapes.items():
if mesh == mdata:
@@ -301,7 +325,10 @@
# This is a particlePatch
spec = species.split("#")
patch = f[bp + pp + "/" + spec[0] + "/particlePatches"]
- num_grids = int(np.ceil(count * self.vpg ** -1))
+ domain_dimension = np.ones(3, dtype=np.int32)
+ for (ind, axis) in enumerate(list(patch["extent"].keys())):
+ domain_dimension[ind] = patch["extent/" + axis].value[int(spec[1])]
+ num_grids = int(np.ceil(count * self.vpg**-1))
gle = []
for axis in patch["offset"].keys():
gle.append(get_component(patch, "offset/" + axis, int(spec[1]), 1)[0])
@@ -318,7 +345,8 @@
dtype=np.int32)
particle_names = [str(spec[0])]
elif str(species) not in handled_ptypes:
- num_grids = int(np.ceil(count * self.vpg ** -1))
+ domain_dimension = self.dataset.domain_dimensions
+ num_grids = int(np.ceil(count * self.vpg**-1))
gle = self.dataset.domain_left_edge
gre = self.dataset.domain_right_edge
particle_count = np.linspace(0, count, num_grids + 1, dtype=np.int32)
@@ -332,7 +360,7 @@
# A grid with this exact particle count has already been created
continue
for grid in np.arange(num_grids):
- self.grid_dimensions[grid_index_total] = [0, 0, 0] # Counted as mesh-size, thus no dimensional extent
+ self.grid_dimensions[grid_index_total] = domain_dimension
self.grid_left_edge[grid_index_total] = gle
self.grid_right_edge[grid_index_total] = gre
self.grid_particle_count[grid_index_total] = (particle_count[grid + 1] - particle_count[grid]) * len(
@@ -376,7 +404,9 @@
**kwargs):
self._handle = HDF5FileHandler(filename)
self.gridsize = kwargs.pop("open_pmd_virtual_gridsize", 10**9)
- self._set_paths(self._handle, path.dirname(filename))
+ self.standard_version = StrictVersion(self._handle.attrs["openPMD"].decode())
+ self.iteration = kwargs.pop("iteration", None)
+ self._set_paths(self._handle, path.dirname(filename), self.iteration)
Dataset.__init__(self,
filename,
dataset_type,
@@ -384,15 +414,18 @@
unit_system=unit_system)
self.storage_filename = storage_filename
self.fluid_types += ("openPMD",)
- particles = tuple(str(c) for c in self._handle[self.base_path + self.particles_path].keys())
- if len(particles) > 1:
- # Only use on-disk particle names if there is more than one species
- self.particle_types = particles
- mylog.debug("open_pmd - self.particle_types: {}".format(self.particle_types))
- self.particle_types_raw = self.particle_types
- self.particle_types = tuple(self.particle_types)
+ try:
+ particles = tuple(str(c) for c in self._handle[self.base_path + self.particles_path].keys())
+ if len(particles) > 1:
+ # Only use on-disk particle names if there is more than one species
+ self.particle_types = particles
+ mylog.debug("self.particle_types: {}".format(self.particle_types))
+ self.particle_types_raw = self.particle_types
+ self.particle_types = tuple(self.particle_types)
+ except(KeyError):
+ pass
- def _set_paths(self, handle, path):
+ def _set_paths(self, handle, path, iteration):
"""Parses relevant hdf5-paths out of ``handle``.
Parameters
@@ -402,29 +435,47 @@
(absolute) filepath for current hdf5 container
"""
iterations = []
+ if iteration is None:
+ iteration = list(handle["/data"].keys())[0]
encoding = handle.attrs["iterationEncoding"].decode()
if "groupBased" in encoding:
iterations = list(handle["/data"].keys())
- mylog.info("open_pmd - found {} iterations in file".format(len(iterations)))
+ mylog.info("Found {} iterations in file".format(len(iterations)))
elif "fileBased" in encoding:
itformat = handle.attrs["iterationFormat"].decode().split("/")[-1]
regex = "^" + itformat.replace("%T", "[0-9]+") + "$"
if path is "":
- mylog.warning("open_pmd - For file based iterations, please use absolute file paths!")
+ mylog.warning("For file based iterations, please use absolute file paths!")
pass
for filename in listdir(path):
if match(regex, filename):
iterations.append(filename)
- mylog.info("open_pmd - found {} iterations in directory".format(len(iterations)))
+ mylog.info("Found {} iterations in directory".format(len(iterations)))
if len(iterations) == 0:
- mylog.warning("open_pmd - no iterations found!")
+ mylog.warning("No iterations found!")
if "groupBased" in encoding and len(iterations) > 1:
- mylog.warning("open_pmd - only choose to load one iteration ({})".format(list(handle["/data"].keys())[0]))
+ mylog.warning("Only chose to load one iteration ({})".format(iteration))
- self.base_path = "/data/{}/".format(list(handle["/data"].keys())[0])
+ self.base_path = "/data/{}/".format(iteration)
self.meshes_path = self._handle["/"].attrs["meshesPath"].decode()
+ try:
+ meshes = handle[self.base_path + self.meshes_path]
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.0.1"):
+ mylog.info("meshesPath not present in file."
+ " Assuming file contains no meshes and has a domain extent of 1m^3!")
+ else:
+ raise
self.particles_path = self._handle["/"].attrs["particlesPath"].decode()
+ try:
+ particles = handle[self.base_path + self.particles_path]
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.0.1"):
+ mylog.info("particlesPath not present in file."
+ " Assuming file contains no particles!")
+ else:
+ raise
def _set_code_unit_attributes(self):
"""Handle conversion between different physical units and the code units.
@@ -455,19 +506,21 @@
shapes = {}
left_edges = {}
right_edges = {}
- for mesh in f[bp + mp].keys():
- if type(f[bp + mp + mesh]) is h5.Group:
- shape = np.asarray(f[bp + mp + mesh + "/" + list(f[bp + mp + mesh].keys())[0]].shape)
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
+ mesh = meshes[mname]
+ if type(mesh) is h5.Group:
+ shape = np.asarray(mesh[list(mesh.keys())[0]].shape)
else:
- shapes[mesh] = np.asarray(f[bp + mp + mesh].shape)
- spacing = np.asarray(f[bp + mp + mesh].attrs["gridSpacing"])
- offset = np.asarray(f[bp + mp + mesh].attrs["gridGlobalOffset"])
- unit_si = np.asarray(f[bp + mp + mesh].attrs["gridUnitSI"])
+ shape = np.asarray(mesh.shape)
+ spacing = np.asarray(mesh.attrs["gridSpacing"])
+ offset = np.asarray(mesh.attrs["gridGlobalOffset"])
+ unit_si = np.asarray(mesh.attrs["gridUnitSI"])
le = offset * unit_si
re = le + shape * unit_si * spacing
- shapes[mesh] = shape
- left_edges[mesh] = le
- right_edges[mesh] = re
+ shapes[mname] = shape
+ left_edges[mname] = le
+ right_edges[mname] = re
lowest_dim = np.min([len(i) for i in shapes.values()])
shapes = np.asarray([i[:lowest_dim] for i in shapes.values()])
left_edges = np.asarray([i[:lowest_dim] for i in left_edges.values()])
@@ -483,12 +536,14 @@
self.domain_dimensions = np.append(fs, np.ones(3 - self.dimensionality))
self.domain_left_edge = np.append(dle, np.zeros(3 - len(dle)))
self.domain_right_edge = np.append(dre, np.ones(3 - len(dre)))
- except ValueError:
- mylog.warning("open_pmd - It seems your data does not contain meshes. Assuming domain extent of 1m^3!")
- self.dimensionality = 3
- self.domain_dimensions = np.ones(3, dtype=np.float64)
- self.domain_left_edge = np.zeros(3, dtype=np.float64)
- self.domain_right_edge = np.ones(3, dtype=np.float64)
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.0.1"):
+ self.dimensionality = 3
+ self.domain_dimensions = np.ones(3, dtype=np.float64)
+ self.domain_left_edge = np.zeros(3, dtype=np.float64)
+ self.domain_right_edge = np.ones(3, dtype=np.float64)
+ else:
+ raise
self.current_time = f[bp].attrs["time"] * f[bp].attrs["timeUnitSI"]
@@ -502,18 +557,85 @@
except (IOError, OSError, ImportError):
return False
- requirements = ["openPMD", "basePath", "meshesPath", "particlesPath"]
attrs = list(f["/"].attrs.keys())
- for i in requirements:
+ for i in opmd_required_attributes:
if i not in attrs:
f.close()
return False
- known_versions = [StrictVersion("1.0.0"),
- StrictVersion("1.0.1")]
- if StrictVersion(f.attrs["openPMD"].decode()) in known_versions:
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
+ f.close()
+ return False
+
+ if f.attrs["iterationEncoding"].decode() == "fileBased":
f.close()
return True
+
+ f.close()
+ return False
+
+
+class OpenPMDDatasetSeries(DatasetSeries):
+ _pre_outputs = ()
+ _dataset_cls = OpenPMDDataset
+ parallel = True
+ setup_function = None
+ mixed_dataset_types = False
+
+ def __init__(self, filename):
+ super(OpenPMDDatasetSeries, self).__init__([])
+ self.handle = h5.File(filename, "r")
+ self.filename = filename
+ self._pre_outputs = sorted(np.asarray(list(self.handle["/data"].keys()), dtype=np.int))
+
+ def __iter__(self):
+ for it in self._pre_outputs:
+ ds = self._load(it, **self.kwargs)
+ self._setup_function(ds)
+ yield ds
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ o = self._load(key)
+ self._setup_function(o)
+ return o
else:
+ raise KeyError("Unknown iteration {}".format(key))
+
+ def _load(self, it, **kwargs):
+ return OpenPMDDataset(self.filename, iteration=it)
+
+
+class OpenPMDGroupBasedDataset(Dataset):
+ _index_class = OpenPMDHierarchy
+ _field_info_class = OpenPMDFieldInfo
+
+ def __new__(cls, *args, **kwargs):
+ ret = object.__new__(OpenPMDDatasetSeries)
+ ret.__init__(args[0])
+ return ret
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ warn_h5py(args[0])
+ try:
+ f = h5.File(args[0], "r")
+ except (IOError, OSError, ImportError):
+ return False
+
+ attrs = list(f["/"].attrs.keys())
+ for i in opmd_required_attributes:
+ if i not in attrs:
+ f.close()
+ return False
+
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
f.close()
return False
+
+ if f.attrs["iterationEncoding"].decode() == "groupBased":
+ f.close()
+ return True
+
+ f.close()
+ return False
diff -r 1d7e99ee5fdcdee5ac15cb7012fe9f467f09c2ec -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 yt/frontends/open_pmd/fields.py
--- a/yt/frontends/open_pmd/fields.py
+++ b/yt/frontends/open_pmd/fields.py
@@ -98,39 +98,36 @@
class OpenPMDFieldInfo(FieldInfoContainer):
"""Specifies which fields from the dataset yt should know about.
- ``self.known_other_fields`` and ``self.known_particle_fields`` must be
- populated. Entries for both of these lists must be tuples of the form
- ``("name", ("units", ["fields", "to", "alias"], "display_name"))``. These
- fields will be represented and handled in yt in the way you define them
- here. The fields defined in both ``self.known_other_fields`` and
- ``self.known_particle_fields`` will only be added to a dataset (with units,
- aliases, etc), if they match any entry in the ``OpenPMDHierarchy``'s
- ``self.field_list``.
+ ``self.known_other_fields`` and ``self.known_particle_fields`` must be populated.
+ Entries for both of these lists must be tuples of the form
+ ("name", ("units", ["fields", "to", "alias"], "display_name"))
+ These fields will be represented and handled in yt in the way you define them here.
+ The fields defined in both ``self.known_other_fields`` and ``self.known_particle_fields`` will only be added
+ to a dataset (with units, aliases, etc), if they match any entry in the ``OpenPMDHierarchy``'s ``self.field_list``.
Notes
-----
- Contrary to many other frontends, we dynamically obtain the known fields
- from the simulation output. The openPMD markup is extremely flexible -
- names, dimensions and the number of individual datasets can (and very likely
- will) vary.
+ Contrary to many other frontends, we dynamically obtain the known fields from the simulation output.
+ The openPMD markup is extremely flexible - names, dimensions and the number of individual datasets
+ can (and very likely will) vary.
- openPMD states that names of records and their components are only allowed
- to contain the characters ``a-Z``, the numbers ``0-9`` and the underscore
- ``_`` (equivalently, the regex ``\w``). Since yt widely uses the underscore
- in field names, openPMD's underscores (``_``) are replaced by hyphen
- (``-``).
+ openPMD states that names of records and their components are only allowed to contain the
+ characters a-Z,
+ the numbers 0-9
+ and the underscore _
+ (equivalently, the regex \w).
+ Since yt widely uses the underscore in field names, openPMD's underscores (_) are replaced by hyphen (-).
- Derived fields will automatically be set up, if names and units of your
- known on-disk (or manually derived) fields match the ones in the list of
- yt "universal" fields.
+ Derived fields will automatically be set up, if names and units of your known on-disk (or manually derived)
+ fields match the ones in [1].
References
----------
* http://yt-project.org/docs/dev/analyzing/fields.html
* http://yt-project.org/docs/dev/developing/creating_frontend.html#data-meaning-structures
* https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md
-
+ * [1] http://yt-project.org/docs/dev/reference/field_list.html#universal-fields
"""
_mag_fields = []
@@ -139,59 +136,69 @@
bp = ds.base_path
mp = ds.meshes_path
pp = ds.particles_path
- fields = f[bp + mp]
- for fname in fields.keys():
- field = fields[fname]
- if type(field) is h5.Dataset or is_const_component(field):
- # Don't consider axes. This appears to be a vector field of single dimensionality
- ytname = str("_".join([fname.replace("_", "-")]))
- parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
- unit = str(YTQuantity(1, parsed).units)
- aliases = []
- # Save a list of magnetic fields for aliasing later on
- # We can not reasonably infer field type/unit by name in openPMD
- if unit == "T" or unit == "kg/(A*s**2)":
- self._mag_fields.append(ytname)
- self.known_other_fields += ((ytname, (unit, aliases, None)),)
- else:
- for axis in field.keys():
- ytname = str("_".join([fname.replace("_", "-"), axis]))
+ try:
+ fields = f[bp + mp]
+ for fname in fields.keys():
+ field = fields[fname]
+ if type(field) is h5.Dataset or is_const_component(field):
+ # Don't consider axes. This appears to be a vector field of single dimensionality
+ ytname = str("_".join([fname.replace("_", "-")]))
parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
unit = str(YTQuantity(1, parsed).units)
aliases = []
# Save a list of magnetic fields for aliasing later on
- # We can not reasonably infer field type by name in openPMD
+ # We can not reasonably infer field type/unit by name in openPMD
if unit == "T" or unit == "kg/(A*s**2)":
self._mag_fields.append(ytname)
self.known_other_fields += ((ytname, (unit, aliases, None)),)
- for i in self.known_other_fields:
- mylog.debug("open_pmd - known_other_fields - {}".format(i))
- particles = f[bp + pp]
- for species in particles.keys():
- for record in particles[species].keys():
- try:
- pds = particles[species + "/" + record]
- parsed = parse_unit_dimension(pds.attrs["unitDimension"])
- unit = str(YTQuantity(1, parsed).units)
- ytattrib = str(record).replace("_", "-")
- if ytattrib == "position":
- # Symbolically rename position to preserve yt's interpretation of the pfield
- # particle_position is later derived in setup_absolute_positions in the way yt expects it
- ytattrib = "positionCoarse"
- if type(pds) is h5.Dataset or is_const_component(pds):
- name = ["particle", ytattrib]
- self.known_particle_fields += ((str("_".join(name)), (unit, [], None)),)
- else:
- for axis in pds.keys():
- aliases = []
- name = ["particle", ytattrib, axis]
- ytname = str("_".join(name))
- self.known_particle_fields += ((ytname, (unit, aliases, None)),)
- except KeyError:
- mylog.info("open_pmd - {}_{} does not seem to have unitDimension".format(species, record))
- for i in self.known_particle_fields:
- mylog.debug("open_pmd - known_particle_fields - {}".format(i))
+ else:
+ for axis in field.keys():
+ ytname = str("_".join([fname.replace("_", "-"), axis]))
+ parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
+ unit = str(YTQuantity(1, parsed).units)
+ aliases = []
+ # Save a list of magnetic fields for aliasing later on
+ # We can not reasonably infer field type by name in openPMD
+ if unit == "T" or unit == "kg/(A*s**2)":
+ self._mag_fields.append(ytname)
+ self.known_other_fields += ((ytname, (unit, aliases, None)),)
+ for i in self.known_other_fields:
+ mylog.debug("open_pmd - known_other_fields - {}".format(i))
+ except(KeyError):
+ pass
+
+ try:
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ for recname in species.keys():
+ try:
+ record = species[recname]
+ parsed = parse_unit_dimension(record.attrs["unitDimension"])
+ unit = str(YTQuantity(1, parsed).units)
+ ytattrib = str(recname).replace("_", "-")
+ if ytattrib == "position":
+ # Symbolically rename position to preserve yt's interpretation of the pfield
+ # particle_position is later derived in setup_absolute_positions in the way yt expects it
+ ytattrib = "positionCoarse"
+ if type(record) is h5.Dataset or is_const_component(record):
+ name = ["particle", ytattrib]
+ self.known_particle_fields += ((str("_".join(name)), (unit, [], None)),)
+ else:
+ for axis in record.keys():
+ aliases = []
+ name = ["particle", ytattrib, axis]
+ ytname = str("_".join(name))
+ self.known_particle_fields += ((ytname, (unit, aliases, None)),)
+ except(KeyError):
+ if recname != "particlePatches":
+ mylog.info("open_pmd - {}_{} does not seem to have unitDimension".format(pname, recname))
+ for i in self.known_particle_fields:
+ mylog.debug("open_pmd - known_particle_fields - {}".format(i))
+ except(KeyError):
+ pass
+
super(OpenPMDFieldInfo, self).__init__(ds, field_list)
def setup_fluid_fields(self):
diff -r 1d7e99ee5fdcdee5ac15cb7012fe9f467f09c2ec -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 yt/frontends/open_pmd/io.py
--- a/yt/frontends/open_pmd/io.py
+++ b/yt/frontends/open_pmd/io.py
@@ -194,13 +194,14 @@
field = (ftype, fname)
for chunk in chunks:
for grid in chunk.objs:
- component = fname.replace("_", "/").replace("-", "_")
- if component.split("/")[0] not in grid.ftypes:
- continue
mask = grid._get_selector_mask(selector)
if mask is None:
continue
- data = get_component(ds, component, grid.findex, grid.foffset)
+ component = fname.replace("_", "/").replace("-", "_")
+ if component.split("/")[0] not in grid.ftypes:
+ data = np.full(grid.ActiveDimensions, 0, dtype=np.float64)
+ else:
+ data = get_component(ds, component, grid.findex, grid.foffset)
# The following is a modified AMRGridPatch.select(...)
data.shape = mask.shape # Workaround - casts a 2D (x,y) array to 3D (x,y,1)
count = grid.count(selector)
diff -r 1d7e99ee5fdcdee5ac15cb7012fe9f467f09c2ec -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 yt/frontends/open_pmd/misc.py
--- a/yt/frontends/open_pmd/misc.py
+++ b/yt/frontends/open_pmd/misc.py
@@ -46,7 +46,7 @@
'kg**1*s**-2*A**-1'
"""
if len(unit_dimension) is not 7:
- mylog.error("open_pmd - SI must have 7 base dimensions!")
+ mylog.error("SI must have 7 base dimensions!")
unit_dimension = np.asarray(unit_dimension, dtype=np.int)
dim = []
si = ["m",
diff -r 1d7e99ee5fdcdee5ac15cb7012fe9f467f09c2ec -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 yt/frontends/open_pmd/tests/test_outputs.py
--- a/yt/frontends/open_pmd/tests/test_outputs.py
+++ b/yt/frontends/open_pmd/tests/test_outputs.py
@@ -27,6 +27,8 @@
twoD = "example-2d/hdf5/data00000100.h5"
threeD = "example-3d/hdf5/data00000100.h5"
+noFields = "no_fields/data00000400.h5"
+noParticles = "no_particles/data00000400.h5"
@requires_file(threeD)
@@ -139,3 +141,70 @@
3.29025596712e-14 * np.ones_like(ds.current_time))
assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
domain_width)
+
+ at requires_file(noFields)
+def test_no_fields_out():
+ ds = data_dir_load(noFields)
+ field_list = [('all', 'particle_charge'),
+ ('all', 'particle_id'),
+ ('all', 'particle_mass'),
+ ('all', 'particle_momentum_x'),
+ ('all', 'particle_momentum_y'),
+ ('all', 'particle_momentum_z'),
+ ('all', 'particle_positionCoarse_x'),
+ ('all', 'particle_positionCoarse_y'),
+ ('all', 'particle_positionCoarse_z'),
+ ('all', 'particle_positionOffset_x'),
+ ('all', 'particle_positionOffset_y'),
+ ('all', 'particle_positionOffset_z'),
+ ('all', 'particle_weighting'),
+ ('io', 'particle_charge'),
+ ('io', 'particle_id'),
+ ('io', 'particle_mass'),
+ ('io', 'particle_momentum_x'),
+ ('io', 'particle_momentum_y'),
+ ('io', 'particle_momentum_z'),
+ ('io', 'particle_positionCoarse_x'),
+ ('io', 'particle_positionCoarse_y'),
+ ('io', 'particle_positionCoarse_z'),
+ ('io', 'particle_positionOffset_x'),
+ ('io', 'particle_positionOffset_y'),
+ ('io', 'particle_positionOffset_z'),
+ ('io', 'particle_weighting')]
+ domain_dimensions = [1, 1, 1] * np.ones_like(ds.domain_dimensions)
+ domain_width = [1, 1, 1] * np.ones_like(ds.domain_left_edge)
+
+ assert isinstance(ds, OpenPMDDataset)
+ assert_equal(str(ds), "data00000400.h5")
+ assert_equal(ds.dimensionality, 3)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert "all" in ds.particle_unions
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert_almost_equal(ds.current_time,
+ 1.3161023868481013e-13 * np.ones_like(ds.current_time))
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
+ at requires_file(noParticles)
+def test_no_particles_out():
+ ds = data_dir_load(noParticles)
+ field_list = [('openPMD', 'E_x'),
+ ('openPMD', 'E_y'),
+ ('openPMD', 'E_z'),
+ ('openPMD', 'rho')]
+ domain_dimensions = [51, 201, 1] * np.ones_like(ds.domain_dimensions)
+ domain_width = [3.06e-05, 2.01e-05, 1e+0] * np.ones_like(ds.domain_left_edge)
+
+ assert isinstance(ds, OpenPMDDataset)
+ assert_equal(str(ds), "data00000400.h5")
+ assert_equal(ds.dimensionality, 2)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert "all" not in ds.particle_unions
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert_almost_equal(ds.current_time,
+ 1.3161023868481013e-13 * np.ones_like(ds.current_time))
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
https://bitbucket.org/yt_analysis/yt/commits/1d1bc9fefedc/
Changeset: 1d1bc9fefedc
User: C0nsultant
Date: 2017-12-08 12:45:37+00:00
Summary: Test case for groupBased data.
Affected #: 2 files
diff -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 -r 1d1bc9fefedc5f6f0931b88d5a79bb8567af8d63 yt/frontends/open_pmd/data_structures.py
--- a/yt/frontends/open_pmd/data_structures.py
+++ b/yt/frontends/open_pmd/data_structures.py
@@ -460,7 +460,7 @@
self.base_path = "/data/{}/".format(iteration)
self.meshes_path = self._handle["/"].attrs["meshesPath"].decode()
try:
- meshes = handle[self.base_path + self.meshes_path]
+ handle[self.base_path + self.meshes_path]
except(KeyError):
if self.standard_version <= StrictVersion("1.0.1"):
mylog.info("meshesPath not present in file."
@@ -469,7 +469,7 @@
raise
self.particles_path = self._handle["/"].attrs["particlesPath"].decode()
try:
- particles = handle[self.base_path + self.particles_path]
+ handle[self.base_path + self.particles_path]
except(KeyError):
if self.standard_version <= StrictVersion("1.0.1"):
mylog.info("particlesPath not present in file."
diff -r 41e06797db1d3e8a53ea185dee53700bc38b8d45 -r 1d1bc9fefedc5f6f0931b88d5a79bb8567af8d63 yt/frontends/open_pmd/tests/test_outputs.py
--- a/yt/frontends/open_pmd/tests/test_outputs.py
+++ b/yt/frontends/open_pmd/tests/test_outputs.py
@@ -22,6 +22,7 @@
requires_file
from yt.utilities.answer_testing.framework import \
data_dir_load
+from yt.convenience import load
import numpy as np
@@ -29,6 +30,7 @@
threeD = "example-3d/hdf5/data00000100.h5"
noFields = "no_fields/data00000400.h5"
noParticles = "no_particles/data00000400.h5"
+groupBased = "singleParticle/simData.h5"
@requires_file(threeD)
@@ -208,3 +210,49 @@
assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
domain_width)
+ at requires_file(groupBased)
+def test_groupBased_out():
+ dss = load(groupBased)
+ field_list = [('all', 'particle_charge'),
+ ('all', 'particle_mass'),
+ ('all', 'particle_momentum_x'),
+ ('all', 'particle_momentum_y'),
+ ('all', 'particle_momentum_z'),
+ ('all', 'particle_positionCoarse_x'),
+ ('all', 'particle_positionCoarse_y'),
+ ('all', 'particle_positionCoarse_z'),
+ ('all', 'particle_positionOffset_x'),
+ ('all', 'particle_positionOffset_y'),
+ ('all', 'particle_positionOffset_z'),
+ ('all', 'particle_weighting'),
+ ('io', 'particle_charge'),
+ ('io', 'particle_mass'),
+ ('io', 'particle_momentum_x'),
+ ('io', 'particle_momentum_y'),
+ ('io', 'particle_momentum_z'),
+ ('io', 'particle_positionCoarse_x'),
+ ('io', 'particle_positionCoarse_y'),
+ ('io', 'particle_positionCoarse_z'),
+ ('io', 'particle_positionOffset_x'),
+ ('io', 'particle_positionOffset_y'),
+ ('io', 'particle_positionOffset_z'),
+ ('io', 'particle_weighting'),
+ ('openPMD', 'J_x'),
+ ('openPMD', 'J_y'),
+ ('openPMD', 'J_z'),
+ ('openPMD', 'e-chargeDensity')]
+ domain_dimensions = [32, 64, 64] * np.ones_like(dss[0].domain_dimensions)
+ domain_width = [0.0002752, 0.0005504, 0.0005504] * np.ones_like(dss[0].domain_left_edge)
+
+ assert_equal(len(dss), 101)
+ for ds in dss:
+ assert_equal(str(ds), "simData.h5")
+ assert_equal(ds.dimensionality, 3)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert ds.current_time >= np.zeros_like(ds.current_time)
+ assert ds.current_time <= 1.6499999999999998e-12 * np.ones_like(ds.current_time)
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
https://bitbucket.org/yt_analysis/yt/commits/ad487a525279/
Changeset: ad487a525279
User: C0nsultant
Date: 2017-12-18 19:28:27+00:00
Summary: Bump version for new behaviour to 1.1.0.
Potential fixes for CI failure: context managers inside is_valid and manually setting the file closing degree
Affected #: 2 files
diff -r 1d1bc9fefedc5f6f0931b88d5a79bb8567af8d63 -r ad487a525279431bb9f7e5654784cb8281407fe2 yt/frontends/open_pmd/data_structures.py
--- a/yt/frontends/open_pmd/data_structures.py
+++ b/yt/frontends/open_pmd/data_structures.py
@@ -38,7 +38,8 @@
from yt.utilities.on_demand_imports import _h5py as h5
ompd_known_versions = [StrictVersion("1.0.0"),
- StrictVersion("1.0.1")]
+ StrictVersion("1.0.1"),
+ StrictVersion("1.1.0")]
opmd_required_attributes = ["openPMD", "basePath", "meshesPath", "particlesPath"]
@@ -462,7 +463,7 @@
try:
handle[self.base_path + self.meshes_path]
except(KeyError):
- if self.standard_version <= StrictVersion("1.0.1"):
+ if self.standard_version <= StrictVersion("1.1.0"):
mylog.info("meshesPath not present in file."
" Assuming file contains no meshes and has a domain extent of 1m^3!")
else:
@@ -471,7 +472,7 @@
try:
handle[self.base_path + self.particles_path]
except(KeyError):
- if self.standard_version <= StrictVersion("1.0.1"):
+ if self.standard_version <= StrictVersion("1.1.0"):
mylog.info("particlesPath not present in file."
" Assuming file contains no particles!")
else:
@@ -537,7 +538,7 @@
self.domain_left_edge = np.append(dle, np.zeros(3 - len(dle)))
self.domain_right_edge = np.append(dre, np.ones(3 - len(dre)))
except(KeyError):
- if self.standard_version <= StrictVersion("1.0.1"):
+ if self.standard_version <= StrictVersion("1.1.0"):
self.dimensionality = 3
self.domain_dimensions = np.ones(3, dtype=np.float64)
self.domain_left_edge = np.zeros(3, dtype=np.float64)
@@ -553,26 +554,22 @@
"""
warn_h5py(args[0])
try:
- f = h5.File(args[0], "r")
+ with h5.File(args[0], "r") as f:
+ attrs = list(f["/"].attrs.keys())
+ for i in opmd_required_attributes:
+ if i not in attrs:
+ return False
+
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
+ return False
+
+ if f.attrs["iterationEncoding"].decode() == "fileBased":
+ return True
+
+ return False
except (IOError, OSError, ImportError):
return False
- attrs = list(f["/"].attrs.keys())
- for i in opmd_required_attributes:
- if i not in attrs:
- f.close()
- return False
-
- if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
- f.close()
- return False
-
- if f.attrs["iterationEncoding"].decode() == "fileBased":
- f.close()
- return True
-
- f.close()
- return False
class OpenPMDDatasetSeries(DatasetSeries):
@@ -584,7 +581,12 @@
def __init__(self, filename):
super(OpenPMDDatasetSeries, self).__init__([])
- self.handle = h5.File(filename, "r")
+ # experimental: CI testing failed for OpenPMDGroupBasedDataset with
+ # 'IOError: Unable to open file (File close degree doesn't match)'
+ plist = h5.h5p.create(h5.h5p.FILE_ACCESS)
+ plist.set_fclose_degree(h5.h5f.CLOSE_DEFAULT)
+ self.handle = h5.File(h5.h5f.open(str.encode(filename), h5.h5f.ACC_RDONLY, fapl=plist))
+ #self.handle = h5.File(filename, "r")
self.filename = filename
self._pre_outputs = sorted(np.asarray(list(self.handle["/data"].keys()), dtype=np.int))
@@ -619,23 +621,19 @@
def _is_valid(self, *args, **kwargs):
warn_h5py(args[0])
try:
- f = h5.File(args[0], "r")
+ with h5.File(args[0], "r") as f:
+ attrs = list(f["/"].attrs.keys())
+ for i in opmd_required_attributes:
+ if i not in attrs:
+ return False
+
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
+ return False
+
+ if f.attrs["iterationEncoding"].decode() == "groupBased":
+ return True
+
+ return False
except (IOError, OSError, ImportError):
return False
- attrs = list(f["/"].attrs.keys())
- for i in opmd_required_attributes:
- if i not in attrs:
- f.close()
- return False
-
- if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
- f.close()
- return False
-
- if f.attrs["iterationEncoding"].decode() == "groupBased":
- f.close()
- return True
-
- f.close()
- return False
diff -r 1d1bc9fefedc5f6f0931b88d5a79bb8567af8d63 -r ad487a525279431bb9f7e5654784cb8281407fe2 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -311,6 +311,19 @@
self._h5f = h5f
return self._h5f
+ _h5p = None
+ @property
+ def h5p(self):
+ if self._err:
+ raise self._err
+ if self._h5p is None:
+ try:
+ import h5py.h5p as h5p
+ except ImportError:
+ h5p = NotAModule(self._name)
+ self._h5p = h5p
+ return self._h5p
+
_h5d = None
@property
def h5d(self):
https://bitbucket.org/yt_analysis/yt/commits/195e4bb170a4/
Changeset: 195e4bb170a4
User: C0nsultant
Date: 2018-01-20 05:49:53+00:00
Summary: Revert experimental File opening since that was not the cause of CI failure.
Affected #: 1 file
diff -r ad487a525279431bb9f7e5654784cb8281407fe2 -r 195e4bb170a4ba5608cbab7feac973d3ef4587ba yt/frontends/open_pmd/data_structures.py
--- a/yt/frontends/open_pmd/data_structures.py
+++ b/yt/frontends/open_pmd/data_structures.py
@@ -581,12 +581,7 @@
def __init__(self, filename):
super(OpenPMDDatasetSeries, self).__init__([])
- # experimental: CI testing failed for OpenPMDGroupBasedDataset with
- # 'IOError: Unable to open file (File close degree doesn't match)'
- plist = h5.h5p.create(h5.h5p.FILE_ACCESS)
- plist.set_fclose_degree(h5.h5f.CLOSE_DEFAULT)
- self.handle = h5.File(h5.h5f.open(str.encode(filename), h5.h5f.ACC_RDONLY, fapl=plist))
- #self.handle = h5.File(filename, "r")
+ self.handle = h5.File(filename, "r")
self.filename = filename
self._pre_outputs = sorted(np.asarray(list(self.handle["/data"].keys()), dtype=np.int))
https://bitbucket.org/yt_analysis/yt/commits/e6a7a03a3241/
Changeset: e6a7a03a3241
User: ngoldbaum
Date: 2018-01-20 07:04:26+00:00
Summary: Merge pull request #1645 from C0nsultant/master
Updated and hardened openPMD frontend
Affected #: 6 files
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/frontends/open_pmd/data_structures.py
--- a/yt/frontends/open_pmd/data_structures.py
+++ b/yt/frontends/open_pmd/data_structures.py
@@ -25,6 +25,7 @@
import numpy as np
from yt.data_objects.grid_patch import AMRGridPatch
from yt.data_objects.static_output import Dataset
+from yt.data_objects.time_series import DatasetSeries
from yt.frontends.open_pmd.fields import OpenPMDFieldInfo
from yt.frontends.open_pmd.misc import \
is_const_component, \
@@ -36,6 +37,11 @@
from yt.utilities.logger import ytLogger as mylog
from yt.utilities.on_demand_imports import _h5py as h5
+ompd_known_versions = [StrictVersion("1.0.0"),
+ StrictVersion("1.0.1"),
+ StrictVersion("1.1.0")]
+opmd_required_attributes = ["openPMD", "basePath", "meshesPath", "particlesPath"]
+
class OpenPMDGrid(AMRGridPatch):
"""Represents chunk of data on-disk.
@@ -47,8 +53,8 @@
_id_offset = 0
__slots__ = ["_level_id"]
# Every particle species and mesh might have different hdf5-indices and offsets
- ftypes=[]
- ptypes=[]
+ ftypes = []
+ ptypes = []
findex = 0
foffset = 0
pindex = 0
@@ -98,17 +104,22 @@
f = self.dataset._handle
bp = self.dataset.base_path
pp = self.dataset.particles_path
- for ptype in self.ds.particle_types_raw:
- if str(ptype) == "io":
- spec = list(f[bp + pp].keys())[0]
- else:
- spec = ptype
- axis = list(f[bp + pp + "/" + spec + "/position"].keys())[0]
- pos = f[bp + pp + "/" + spec + "/position/" + axis]
- if is_const_component(pos):
- result[ptype] = pos.attrs["shape"]
- else:
- result[ptype] = pos.len()
+
+ try:
+ for ptype in self.ds.particle_types_raw:
+ if str(ptype) == "io":
+ spec = list(f[bp + pp].keys())[0]
+ else:
+ spec = ptype
+ axis = list(f[bp + pp + "/" + spec + "/position"].keys())[0]
+ pos = f[bp + pp + "/" + spec + "/position/" + axis]
+ if is_const_component(pos):
+ result[ptype] = pos.attrs["shape"]
+ else:
+ result[ptype] = pos.len()
+ except(KeyError):
+ result["io"] = 0
+
return result
def _detect_output_fields(self):
@@ -127,46 +138,50 @@
mesh_fields = []
try:
- for field in f[bp + mp].keys():
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
try:
- for axis in f[bp + mp + field].keys():
- mesh_fields.append(field.replace("_", "-")
+ mesh = meshes[mname]
+ for axis in mesh.keys():
+ mesh_fields.append(mname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# This is a h5.Dataset (i.e. no axes)
- mesh_fields.append(field.replace("_", "-"))
- except KeyError:
- # There are no mesh fields
+ mesh_fields.append(mname.replace("_", "-"))
+ except(KeyError):
pass
self.field_list = [("openPMD", str(field)) for field in mesh_fields]
particle_fields = []
try:
- for species in f[bp + pp].keys():
- for record in f[bp + pp + species].keys():
- if is_const_component(f[bp + pp + species + "/" + record]):
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ for recname in species.keys():
+ record = species[recname]
+ if is_const_component(record):
# Record itself (e.g. particle_mass) is constant
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-"))
- elif "particlePatches" not in record:
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-"))
+ elif "particlePatches" not in recname:
try:
# Create a field for every axis (x,y,z) of every property (position)
# of every species (electrons)
- axes = list(f[bp + pp + species + "/" + record].keys())
- if str(record) == "position":
- record = "positionCoarse"
+ axes = list(record.keys())
+ if str(recname) == "position":
+ recname = "positionCoarse"
for axis in axes:
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-")
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-")
+ "_" + axis)
except AttributeError:
# Record is a dataset, does not have axes (e.g. weighting)
- particle_fields.append(species.replace("_", "-")
- + "_" + record.replace("_", "-"))
+ particle_fields.append(pname.replace("_", "-")
+ + "_" + recname.replace("_", "-"))
pass
else:
pass
- if len(list(f[bp + pp].keys())) > 1:
+ if len(list(particles.keys())) > 1:
# There is more than one particle species, use the specific names as field types
self.field_list.extend(
[(str(field).split("_")[0],
@@ -176,8 +191,7 @@
self.field_list.extend(
[("io",
("particle_" + "_".join(str(field).split("_")[1:]))) for field in particle_fields])
- except KeyError:
- # There are no particle fields
+ except(KeyError):
pass
def _count_grids(self):
@@ -195,25 +209,36 @@
self.num_grids = 0
- for mesh in f[bp + mp].keys():
- if type(f[bp + mp + mesh]) is h5.Group:
- shape = f[bp + mp + mesh + "/" + list(f[bp + mp + mesh].keys())[0]].shape
- else:
- shape = f[bp + mp + mesh].shape
- spacing = tuple(f[bp + mp + mesh].attrs["gridSpacing"])
- offset = tuple(f[bp + mp + mesh].attrs["gridGlobalOffset"])
- unit_si = f[bp + mp + mesh].attrs["gridUnitSI"]
- self.meshshapes[mesh] = (shape, spacing, offset, unit_si)
- for species in f[bp + pp].keys():
- if "particlePatches" in f[bp + pp + "/" + species].keys():
- for (patch, size) in enumerate(f[bp + pp + "/" + species + "/particlePatches/numParticles"]):
- self.numparts[species + "#" + str(patch)] = size
- else:
- axis = list(f[bp + pp + species + "/position"].keys())[0]
- if is_const_component(f[bp + pp + species + "/position/" + axis]):
- self.numparts[species] = f[bp + pp + species + "/position/" + axis].attrs["shape"]
+ try:
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
+ mesh = meshes[mname]
+ if type(mesh) is h5.Group:
+ shape = mesh[list(mesh.keys())[0]].shape
else:
- self.numparts[species] = f[bp + pp + species + "/position/" + axis].len()
+ shape = mesh.shape
+ spacing = tuple(mesh.attrs["gridSpacing"])
+ offset = tuple(mesh.attrs["gridGlobalOffset"])
+ unit_si = mesh.attrs["gridUnitSI"]
+ self.meshshapes[mname] = (shape, spacing, offset, unit_si)
+ except(KeyError):
+ pass
+
+ try:
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ if "particlePatches" in species.keys():
+ for (patch, size) in enumerate(species["/particlePatches/numParticles"]):
+ self.numparts[pname + "#" + str(patch)] = size
+ else:
+ axis = list(species["/position"].keys())[0]
+ if is_const_component(species["/position/" + axis]):
+ self.numparts[pname] = species["/position/" + axis].attrs["shape"]
+ else:
+ self.numparts[pname] = species["/position/" + axis].len()
+ except(KeyError):
+ pass
# Limit values per grid by resulting memory footprint
self.vpg = int(self.dataset.gridsize / 4) # 4Byte per value (f32)
@@ -231,9 +256,9 @@
else:
no_patches[k] = v
for size in set(no_patches.values()):
- self.num_grids += int(np.ceil(size * self.vpg ** -1))
+ self.num_grids += int(np.ceil(size * self.vpg**-1))
for size in patches.values():
- self.num_grids += int(np.ceil(size * self.vpg ** -1))
+ self.num_grids += int(np.ceil(size * self.vpg**-1))
def _parse_index(self):
"""Fills each grid with appropriate properties (extent, dimensions, ...)
@@ -266,13 +291,13 @@
domain_dimension = np.asarray(shape, dtype=np.int32)
domain_dimension = np.append(domain_dimension, np.ones(3 - len(domain_dimension)))
# Number of grids of this shape
- num_grids = min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg ** -1)))
+ num_grids = min(shape[0], int(np.ceil(reduce(mul, shape) * self.vpg**-1)))
gle = offset * unit_si # self.dataset.domain_left_edge
gre = domain_dimension[:spacing.size] * unit_si * spacing + gle # self.dataset.domain_right_edge
gle = np.append(gle, np.zeros(3 - len(gle)))
gre = np.append(gre, np.ones(3 - len(gre)))
grid_dim_offset = np.linspace(0, domain_dimension[0], num_grids + 1, dtype=np.int32)
- grid_edge_offset = grid_dim_offset * np.float(domain_dimension[0]) ** -1 * (gre[0] - gle[0]) + gle[0]
+ grid_edge_offset = grid_dim_offset * np.float(domain_dimension[0])**-1 * (gre[0] - gle[0]) + gle[0]
mesh_names = []
for (mname, mdata) in self.meshshapes.items():
if mesh == mdata:
@@ -301,7 +326,10 @@
# This is a particlePatch
spec = species.split("#")
patch = f[bp + pp + "/" + spec[0] + "/particlePatches"]
- num_grids = int(np.ceil(count * self.vpg ** -1))
+ domain_dimension = np.ones(3, dtype=np.int32)
+ for (ind, axis) in enumerate(list(patch["extent"].keys())):
+ domain_dimension[ind] = patch["extent/" + axis].value[int(spec[1])]
+ num_grids = int(np.ceil(count * self.vpg**-1))
gle = []
for axis in patch["offset"].keys():
gle.append(get_component(patch, "offset/" + axis, int(spec[1]), 1)[0])
@@ -318,7 +346,8 @@
dtype=np.int32)
particle_names = [str(spec[0])]
elif str(species) not in handled_ptypes:
- num_grids = int(np.ceil(count * self.vpg ** -1))
+ domain_dimension = self.dataset.domain_dimensions
+ num_grids = int(np.ceil(count * self.vpg**-1))
gle = self.dataset.domain_left_edge
gre = self.dataset.domain_right_edge
particle_count = np.linspace(0, count, num_grids + 1, dtype=np.int32)
@@ -332,7 +361,7 @@
# A grid with this exact particle count has already been created
continue
for grid in np.arange(num_grids):
- self.grid_dimensions[grid_index_total] = [0, 0, 0] # Counted as mesh-size, thus no dimensional extent
+ self.grid_dimensions[grid_index_total] = domain_dimension
self.grid_left_edge[grid_index_total] = gle
self.grid_right_edge[grid_index_total] = gre
self.grid_particle_count[grid_index_total] = (particle_count[grid + 1] - particle_count[grid]) * len(
@@ -376,7 +405,9 @@
**kwargs):
self._handle = HDF5FileHandler(filename)
self.gridsize = kwargs.pop("open_pmd_virtual_gridsize", 10**9)
- self._set_paths(self._handle, path.dirname(filename))
+ self.standard_version = StrictVersion(self._handle.attrs["openPMD"].decode())
+ self.iteration = kwargs.pop("iteration", None)
+ self._set_paths(self._handle, path.dirname(filename), self.iteration)
Dataset.__init__(self,
filename,
dataset_type,
@@ -384,15 +415,18 @@
unit_system=unit_system)
self.storage_filename = storage_filename
self.fluid_types += ("openPMD",)
- particles = tuple(str(c) for c in self._handle[self.base_path + self.particles_path].keys())
- if len(particles) > 1:
- # Only use on-disk particle names if there is more than one species
- self.particle_types = particles
- mylog.debug("open_pmd - self.particle_types: {}".format(self.particle_types))
- self.particle_types_raw = self.particle_types
- self.particle_types = tuple(self.particle_types)
+ try:
+ particles = tuple(str(c) for c in self._handle[self.base_path + self.particles_path].keys())
+ if len(particles) > 1:
+ # Only use on-disk particle names if there is more than one species
+ self.particle_types = particles
+ mylog.debug("self.particle_types: {}".format(self.particle_types))
+ self.particle_types_raw = self.particle_types
+ self.particle_types = tuple(self.particle_types)
+ except(KeyError):
+ pass
- def _set_paths(self, handle, path):
+ def _set_paths(self, handle, path, iteration):
"""Parses relevant hdf5-paths out of ``handle``.
Parameters
@@ -402,29 +436,47 @@
(absolute) filepath for current hdf5 container
"""
iterations = []
+ if iteration is None:
+ iteration = list(handle["/data"].keys())[0]
encoding = handle.attrs["iterationEncoding"].decode()
if "groupBased" in encoding:
iterations = list(handle["/data"].keys())
- mylog.info("open_pmd - found {} iterations in file".format(len(iterations)))
+ mylog.info("Found {} iterations in file".format(len(iterations)))
elif "fileBased" in encoding:
itformat = handle.attrs["iterationFormat"].decode().split("/")[-1]
regex = "^" + itformat.replace("%T", "[0-9]+") + "$"
if path is "":
- mylog.warning("open_pmd - For file based iterations, please use absolute file paths!")
+ mylog.warning("For file based iterations, please use absolute file paths!")
pass
for filename in listdir(path):
if match(regex, filename):
iterations.append(filename)
- mylog.info("open_pmd - found {} iterations in directory".format(len(iterations)))
+ mylog.info("Found {} iterations in directory".format(len(iterations)))
if len(iterations) == 0:
- mylog.warning("open_pmd - no iterations found!")
+ mylog.warning("No iterations found!")
if "groupBased" in encoding and len(iterations) > 1:
- mylog.warning("open_pmd - only choose to load one iteration ({})".format(list(handle["/data"].keys())[0]))
+ mylog.warning("Only chose to load one iteration ({})".format(iteration))
- self.base_path = "/data/{}/".format(list(handle["/data"].keys())[0])
+ self.base_path = "/data/{}/".format(iteration)
self.meshes_path = self._handle["/"].attrs["meshesPath"].decode()
+ try:
+ handle[self.base_path + self.meshes_path]
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.1.0"):
+ mylog.info("meshesPath not present in file."
+ " Assuming file contains no meshes and has a domain extent of 1m^3!")
+ else:
+ raise
self.particles_path = self._handle["/"].attrs["particlesPath"].decode()
+ try:
+ handle[self.base_path + self.particles_path]
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.1.0"):
+ mylog.info("particlesPath not present in file."
+ " Assuming file contains no particles!")
+ else:
+ raise
def _set_code_unit_attributes(self):
"""Handle conversion between different physical units and the code units.
@@ -455,19 +507,21 @@
shapes = {}
left_edges = {}
right_edges = {}
- for mesh in f[bp + mp].keys():
- if type(f[bp + mp + mesh]) is h5.Group:
- shape = np.asarray(f[bp + mp + mesh + "/" + list(f[bp + mp + mesh].keys())[0]].shape)
+ meshes = f[bp + mp]
+ for mname in meshes.keys():
+ mesh = meshes[mname]
+ if type(mesh) is h5.Group:
+ shape = np.asarray(mesh[list(mesh.keys())[0]].shape)
else:
- shapes[mesh] = np.asarray(f[bp + mp + mesh].shape)
- spacing = np.asarray(f[bp + mp + mesh].attrs["gridSpacing"])
- offset = np.asarray(f[bp + mp + mesh].attrs["gridGlobalOffset"])
- unit_si = np.asarray(f[bp + mp + mesh].attrs["gridUnitSI"])
+ shape = np.asarray(mesh.shape)
+ spacing = np.asarray(mesh.attrs["gridSpacing"])
+ offset = np.asarray(mesh.attrs["gridGlobalOffset"])
+ unit_si = np.asarray(mesh.attrs["gridUnitSI"])
le = offset * unit_si
re = le + shape * unit_si * spacing
- shapes[mesh] = shape
- left_edges[mesh] = le
- right_edges[mesh] = re
+ shapes[mname] = shape
+ left_edges[mname] = le
+ right_edges[mname] = re
lowest_dim = np.min([len(i) for i in shapes.values()])
shapes = np.asarray([i[:lowest_dim] for i in shapes.values()])
left_edges = np.asarray([i[:lowest_dim] for i in left_edges.values()])
@@ -483,12 +537,14 @@
self.domain_dimensions = np.append(fs, np.ones(3 - self.dimensionality))
self.domain_left_edge = np.append(dle, np.zeros(3 - len(dle)))
self.domain_right_edge = np.append(dre, np.ones(3 - len(dre)))
- except ValueError:
- mylog.warning("open_pmd - It seems your data does not contain meshes. Assuming domain extent of 1m^3!")
- self.dimensionality = 3
- self.domain_dimensions = np.ones(3, dtype=np.float64)
- self.domain_left_edge = np.zeros(3, dtype=np.float64)
- self.domain_right_edge = np.ones(3, dtype=np.float64)
+ except(KeyError):
+ if self.standard_version <= StrictVersion("1.1.0"):
+ self.dimensionality = 3
+ self.domain_dimensions = np.ones(3, dtype=np.float64)
+ self.domain_left_edge = np.zeros(3, dtype=np.float64)
+ self.domain_right_edge = np.ones(3, dtype=np.float64)
+ else:
+ raise
self.current_time = f[bp].attrs["time"] * f[bp].attrs["timeUnitSI"]
@@ -498,22 +554,81 @@
"""
warn_h5py(args[0])
try:
- f = h5.File(args[0], "r")
+ with h5.File(args[0], "r") as f:
+ attrs = list(f["/"].attrs.keys())
+ for i in opmd_required_attributes:
+ if i not in attrs:
+ return False
+
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
+ return False
+
+ if f.attrs["iterationEncoding"].decode() == "fileBased":
+ return True
+
+ return False
except (IOError, OSError, ImportError):
return False
- requirements = ["openPMD", "basePath", "meshesPath", "particlesPath"]
- attrs = list(f["/"].attrs.keys())
- for i in requirements:
- if i not in attrs:
- f.close()
- return False
+
+
+class OpenPMDDatasetSeries(DatasetSeries):
+ _pre_outputs = ()
+ _dataset_cls = OpenPMDDataset
+ parallel = True
+ setup_function = None
+ mixed_dataset_types = False
+
+ def __init__(self, filename):
+ super(OpenPMDDatasetSeries, self).__init__([])
+ self.handle = h5.File(filename, "r")
+ self.filename = filename
+ self._pre_outputs = sorted(np.asarray(list(self.handle["/data"].keys()), dtype=np.int))
+
+ def __iter__(self):
+ for it in self._pre_outputs:
+ ds = self._load(it, **self.kwargs)
+ self._setup_function(ds)
+ yield ds
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ o = self._load(key)
+ self._setup_function(o)
+ return o
+ else:
+ raise KeyError("Unknown iteration {}".format(key))
- known_versions = [StrictVersion("1.0.0"),
- StrictVersion("1.0.1")]
- if StrictVersion(f.attrs["openPMD"].decode()) in known_versions:
- f.close()
- return True
- else:
- f.close()
+ def _load(self, it, **kwargs):
+ return OpenPMDDataset(self.filename, iteration=it)
+
+
+class OpenPMDGroupBasedDataset(Dataset):
+ _index_class = OpenPMDHierarchy
+ _field_info_class = OpenPMDFieldInfo
+
+ def __new__(cls, *args, **kwargs):
+ ret = object.__new__(OpenPMDDatasetSeries)
+ ret.__init__(args[0])
+ return ret
+
+ @classmethod
+ def _is_valid(self, *args, **kwargs):
+ warn_h5py(args[0])
+ try:
+ with h5.File(args[0], "r") as f:
+ attrs = list(f["/"].attrs.keys())
+ for i in opmd_required_attributes:
+ if i not in attrs:
+ return False
+
+ if StrictVersion(f.attrs["openPMD"].decode()) not in ompd_known_versions:
+ return False
+
+ if f.attrs["iterationEncoding"].decode() == "groupBased":
+ return True
+
+ return False
+ except (IOError, OSError, ImportError):
return False
+
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/frontends/open_pmd/fields.py
--- a/yt/frontends/open_pmd/fields.py
+++ b/yt/frontends/open_pmd/fields.py
@@ -98,39 +98,36 @@
class OpenPMDFieldInfo(FieldInfoContainer):
"""Specifies which fields from the dataset yt should know about.
- ``self.known_other_fields`` and ``self.known_particle_fields`` must be
- populated. Entries for both of these lists must be tuples of the form
- ``("name", ("units", ["fields", "to", "alias"], "display_name"))``. These
- fields will be represented and handled in yt in the way you define them
- here. The fields defined in both ``self.known_other_fields`` and
- ``self.known_particle_fields`` will only be added to a dataset (with units,
- aliases, etc), if they match any entry in the ``OpenPMDHierarchy``'s
- ``self.field_list``.
+ ``self.known_other_fields`` and ``self.known_particle_fields`` must be populated.
+ Entries for both of these lists must be tuples of the form
+ ("name", ("units", ["fields", "to", "alias"], "display_name"))
+ These fields will be represented and handled in yt in the way you define them here.
+ The fields defined in both ``self.known_other_fields`` and ``self.known_particle_fields`` will only be added
+ to a dataset (with units, aliases, etc), if they match any entry in the ``OpenPMDHierarchy``'s ``self.field_list``.
Notes
-----
- Contrary to many other frontends, we dynamically obtain the known fields
- from the simulation output. The openPMD markup is extremely flexible -
- names, dimensions and the number of individual datasets can (and very likely
- will) vary.
+ Contrary to many other frontends, we dynamically obtain the known fields from the simulation output.
+ The openPMD markup is extremely flexible - names, dimensions and the number of individual datasets
+ can (and very likely will) vary.
- openPMD states that names of records and their components are only allowed
- to contain the characters ``a-Z``, the numbers ``0-9`` and the underscore
- ``_`` (equivalently, the regex ``\w``). Since yt widely uses the underscore
- in field names, openPMD's underscores (``_``) are replaced by hyphen
- (``-``).
+ openPMD states that names of records and their components are only allowed to contain the
+ characters a-Z,
+ the numbers 0-9
+ and the underscore _
+ (equivalently, the regex \w).
+ Since yt widely uses the underscore in field names, openPMD's underscores (_) are replaced by hyphen (-).
- Derived fields will automatically be set up, if names and units of your
- known on-disk (or manually derived) fields match the ones in the list of
- yt "universal" fields.
+ Derived fields will automatically be set up, if names and units of your known on-disk (or manually derived)
+ fields match the ones in [1].
References
----------
* http://yt-project.org/docs/dev/analyzing/fields.html
* http://yt-project.org/docs/dev/developing/creating_frontend.html#data-meaning-structures
* https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md
-
+ * [1] http://yt-project.org/docs/dev/reference/field_list.html#universal-fields
"""
_mag_fields = []
@@ -139,59 +136,69 @@
bp = ds.base_path
mp = ds.meshes_path
pp = ds.particles_path
- fields = f[bp + mp]
- for fname in fields.keys():
- field = fields[fname]
- if type(field) is h5.Dataset or is_const_component(field):
- # Don't consider axes. This appears to be a vector field of single dimensionality
- ytname = str("_".join([fname.replace("_", "-")]))
- parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
- unit = str(YTQuantity(1, parsed).units)
- aliases = []
- # Save a list of magnetic fields for aliasing later on
- # We can not reasonably infer field type/unit by name in openPMD
- if unit == "T" or unit == "kg/(A*s**2)":
- self._mag_fields.append(ytname)
- self.known_other_fields += ((ytname, (unit, aliases, None)),)
- else:
- for axis in field.keys():
- ytname = str("_".join([fname.replace("_", "-"), axis]))
+ try:
+ fields = f[bp + mp]
+ for fname in fields.keys():
+ field = fields[fname]
+ if type(field) is h5.Dataset or is_const_component(field):
+ # Don't consider axes. This appears to be a vector field of single dimensionality
+ ytname = str("_".join([fname.replace("_", "-")]))
parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
unit = str(YTQuantity(1, parsed).units)
aliases = []
# Save a list of magnetic fields for aliasing later on
- # We can not reasonably infer field type by name in openPMD
+ # We can not reasonably infer field type/unit by name in openPMD
if unit == "T" or unit == "kg/(A*s**2)":
self._mag_fields.append(ytname)
self.known_other_fields += ((ytname, (unit, aliases, None)),)
- for i in self.known_other_fields:
- mylog.debug("open_pmd - known_other_fields - {}".format(i))
- particles = f[bp + pp]
- for species in particles.keys():
- for record in particles[species].keys():
- try:
- pds = particles[species + "/" + record]
- parsed = parse_unit_dimension(pds.attrs["unitDimension"])
- unit = str(YTQuantity(1, parsed).units)
- ytattrib = str(record).replace("_", "-")
- if ytattrib == "position":
- # Symbolically rename position to preserve yt's interpretation of the pfield
- # particle_position is later derived in setup_absolute_positions in the way yt expects it
- ytattrib = "positionCoarse"
- if type(pds) is h5.Dataset or is_const_component(pds):
- name = ["particle", ytattrib]
- self.known_particle_fields += ((str("_".join(name)), (unit, [], None)),)
- else:
- for axis in pds.keys():
- aliases = []
- name = ["particle", ytattrib, axis]
- ytname = str("_".join(name))
- self.known_particle_fields += ((ytname, (unit, aliases, None)),)
- except KeyError:
- mylog.info("open_pmd - {}_{} does not seem to have unitDimension".format(species, record))
- for i in self.known_particle_fields:
- mylog.debug("open_pmd - known_particle_fields - {}".format(i))
+ else:
+ for axis in field.keys():
+ ytname = str("_".join([fname.replace("_", "-"), axis]))
+ parsed = parse_unit_dimension(np.asarray(field.attrs["unitDimension"], dtype=np.int))
+ unit = str(YTQuantity(1, parsed).units)
+ aliases = []
+ # Save a list of magnetic fields for aliasing later on
+ # We can not reasonably infer field type by name in openPMD
+ if unit == "T" or unit == "kg/(A*s**2)":
+ self._mag_fields.append(ytname)
+ self.known_other_fields += ((ytname, (unit, aliases, None)),)
+ for i in self.known_other_fields:
+ mylog.debug("open_pmd - known_other_fields - {}".format(i))
+ except(KeyError):
+ pass
+
+ try:
+ particles = f[bp + pp]
+ for pname in particles.keys():
+ species = particles[pname]
+ for recname in species.keys():
+ try:
+ record = species[recname]
+ parsed = parse_unit_dimension(record.attrs["unitDimension"])
+ unit = str(YTQuantity(1, parsed).units)
+ ytattrib = str(recname).replace("_", "-")
+ if ytattrib == "position":
+ # Symbolically rename position to preserve yt's interpretation of the pfield
+ # particle_position is later derived in setup_absolute_positions in the way yt expects it
+ ytattrib = "positionCoarse"
+ if type(record) is h5.Dataset or is_const_component(record):
+ name = ["particle", ytattrib]
+ self.known_particle_fields += ((str("_".join(name)), (unit, [], None)),)
+ else:
+ for axis in record.keys():
+ aliases = []
+ name = ["particle", ytattrib, axis]
+ ytname = str("_".join(name))
+ self.known_particle_fields += ((ytname, (unit, aliases, None)),)
+ except(KeyError):
+ if recname != "particlePatches":
+ mylog.info("open_pmd - {}_{} does not seem to have unitDimension".format(pname, recname))
+ for i in self.known_particle_fields:
+ mylog.debug("open_pmd - known_particle_fields - {}".format(i))
+ except(KeyError):
+ pass
+
super(OpenPMDFieldInfo, self).__init__(ds, field_list)
def setup_fluid_fields(self):
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/frontends/open_pmd/io.py
--- a/yt/frontends/open_pmd/io.py
+++ b/yt/frontends/open_pmd/io.py
@@ -194,13 +194,14 @@
field = (ftype, fname)
for chunk in chunks:
for grid in chunk.objs:
- component = fname.replace("_", "/").replace("-", "_")
- if component.split("/")[0] not in grid.ftypes:
- continue
mask = grid._get_selector_mask(selector)
if mask is None:
continue
- data = get_component(ds, component, grid.findex, grid.foffset)
+ component = fname.replace("_", "/").replace("-", "_")
+ if component.split("/")[0] not in grid.ftypes:
+ data = np.full(grid.ActiveDimensions, 0, dtype=np.float64)
+ else:
+ data = get_component(ds, component, grid.findex, grid.foffset)
# The following is a modified AMRGridPatch.select(...)
data.shape = mask.shape # Workaround - casts a 2D (x,y) array to 3D (x,y,1)
count = grid.count(selector)
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/frontends/open_pmd/misc.py
--- a/yt/frontends/open_pmd/misc.py
+++ b/yt/frontends/open_pmd/misc.py
@@ -46,7 +46,7 @@
'kg**1*s**-2*A**-1'
"""
if len(unit_dimension) is not 7:
- mylog.error("open_pmd - SI must have 7 base dimensions!")
+ mylog.error("SI must have 7 base dimensions!")
unit_dimension = np.asarray(unit_dimension, dtype=np.int)
dim = []
si = ["m",
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/frontends/open_pmd/tests/test_outputs.py
--- a/yt/frontends/open_pmd/tests/test_outputs.py
+++ b/yt/frontends/open_pmd/tests/test_outputs.py
@@ -22,11 +22,15 @@
requires_file
from yt.utilities.answer_testing.framework import \
data_dir_load
+from yt.convenience import load
import numpy as np
twoD = "example-2d/hdf5/data00000100.h5"
threeD = "example-3d/hdf5/data00000100.h5"
+noFields = "no_fields/data00000400.h5"
+noParticles = "no_particles/data00000400.h5"
+groupBased = "singleParticle/simData.h5"
@requires_file(threeD)
@@ -139,3 +143,116 @@
3.29025596712e-14 * np.ones_like(ds.current_time))
assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
domain_width)
+
+ at requires_file(noFields)
+def test_no_fields_out():
+ ds = data_dir_load(noFields)
+ field_list = [('all', 'particle_charge'),
+ ('all', 'particle_id'),
+ ('all', 'particle_mass'),
+ ('all', 'particle_momentum_x'),
+ ('all', 'particle_momentum_y'),
+ ('all', 'particle_momentum_z'),
+ ('all', 'particle_positionCoarse_x'),
+ ('all', 'particle_positionCoarse_y'),
+ ('all', 'particle_positionCoarse_z'),
+ ('all', 'particle_positionOffset_x'),
+ ('all', 'particle_positionOffset_y'),
+ ('all', 'particle_positionOffset_z'),
+ ('all', 'particle_weighting'),
+ ('io', 'particle_charge'),
+ ('io', 'particle_id'),
+ ('io', 'particle_mass'),
+ ('io', 'particle_momentum_x'),
+ ('io', 'particle_momentum_y'),
+ ('io', 'particle_momentum_z'),
+ ('io', 'particle_positionCoarse_x'),
+ ('io', 'particle_positionCoarse_y'),
+ ('io', 'particle_positionCoarse_z'),
+ ('io', 'particle_positionOffset_x'),
+ ('io', 'particle_positionOffset_y'),
+ ('io', 'particle_positionOffset_z'),
+ ('io', 'particle_weighting')]
+ domain_dimensions = [1, 1, 1] * np.ones_like(ds.domain_dimensions)
+ domain_width = [1, 1, 1] * np.ones_like(ds.domain_left_edge)
+
+ assert isinstance(ds, OpenPMDDataset)
+ assert_equal(str(ds), "data00000400.h5")
+ assert_equal(ds.dimensionality, 3)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert "all" in ds.particle_unions
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert_almost_equal(ds.current_time,
+ 1.3161023868481013e-13 * np.ones_like(ds.current_time))
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
+ at requires_file(noParticles)
+def test_no_particles_out():
+ ds = data_dir_load(noParticles)
+ field_list = [('openPMD', 'E_x'),
+ ('openPMD', 'E_y'),
+ ('openPMD', 'E_z'),
+ ('openPMD', 'rho')]
+ domain_dimensions = [51, 201, 1] * np.ones_like(ds.domain_dimensions)
+ domain_width = [3.06e-05, 2.01e-05, 1e+0] * np.ones_like(ds.domain_left_edge)
+
+ assert isinstance(ds, OpenPMDDataset)
+ assert_equal(str(ds), "data00000400.h5")
+ assert_equal(ds.dimensionality, 2)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert "all" not in ds.particle_unions
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert_almost_equal(ds.current_time,
+ 1.3161023868481013e-13 * np.ones_like(ds.current_time))
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
+ at requires_file(groupBased)
+def test_groupBased_out():
+ dss = load(groupBased)
+ field_list = [('all', 'particle_charge'),
+ ('all', 'particle_mass'),
+ ('all', 'particle_momentum_x'),
+ ('all', 'particle_momentum_y'),
+ ('all', 'particle_momentum_z'),
+ ('all', 'particle_positionCoarse_x'),
+ ('all', 'particle_positionCoarse_y'),
+ ('all', 'particle_positionCoarse_z'),
+ ('all', 'particle_positionOffset_x'),
+ ('all', 'particle_positionOffset_y'),
+ ('all', 'particle_positionOffset_z'),
+ ('all', 'particle_weighting'),
+ ('io', 'particle_charge'),
+ ('io', 'particle_mass'),
+ ('io', 'particle_momentum_x'),
+ ('io', 'particle_momentum_y'),
+ ('io', 'particle_momentum_z'),
+ ('io', 'particle_positionCoarse_x'),
+ ('io', 'particle_positionCoarse_y'),
+ ('io', 'particle_positionCoarse_z'),
+ ('io', 'particle_positionOffset_x'),
+ ('io', 'particle_positionOffset_y'),
+ ('io', 'particle_positionOffset_z'),
+ ('io', 'particle_weighting'),
+ ('openPMD', 'J_x'),
+ ('openPMD', 'J_y'),
+ ('openPMD', 'J_z'),
+ ('openPMD', 'e-chargeDensity')]
+ domain_dimensions = [32, 64, 64] * np.ones_like(dss[0].domain_dimensions)
+ domain_width = [0.0002752, 0.0005504, 0.0005504] * np.ones_like(dss[0].domain_left_edge)
+
+ assert_equal(len(dss), 101)
+ for ds in dss:
+ assert_equal(str(ds), "simData.h5")
+ assert_equal(ds.dimensionality, 3)
+ assert_equal(ds.particle_types_raw, ('io', ))
+ assert_array_equal(ds.field_list, field_list)
+ assert_array_equal(ds.domain_dimensions, domain_dimensions)
+ assert ds.current_time >= np.zeros_like(ds.current_time)
+ assert ds.current_time <= 1.6499999999999998e-12 * np.ones_like(ds.current_time)
+ assert_almost_equal(ds.domain_right_edge - ds.domain_left_edge,
+ domain_width)
+
diff -r 1421ce71a149255f8179332369c3d5a4abe4dd7c -r e6a7a03a324126a91b9911a7e416dba39608cbe1 yt/utilities/on_demand_imports.py
--- a/yt/utilities/on_demand_imports.py
+++ b/yt/utilities/on_demand_imports.py
@@ -311,6 +311,19 @@
self._h5f = h5f
return self._h5f
+ _h5p = None
+ @property
+ def h5p(self):
+ if self._err:
+ raise self._err
+ if self._h5p is None:
+ try:
+ import h5py.h5p as h5p
+ except ImportError:
+ h5p = NotAModule(self._name)
+ self._h5p = h5p
+ return self._h5p
+
_h5d = None
@property
def h5d(self):
Repository URL: https://bitbucket.org/yt_analysis/yt/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
More information about the yt-svn
mailing list